aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3120-drm-amdkfd-Remove-dead-code-from-gfx8-gfx9-trap-hand.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3120-drm-amdkfd-Remove-dead-code-from-gfx8-gfx9-trap-hand.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3120-drm-amdkfd-Remove-dead-code-from-gfx8-gfx9-trap-hand.patch1254
1 files changed, 1254 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3120-drm-amdkfd-Remove-dead-code-from-gfx8-gfx9-trap-hand.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3120-drm-amdkfd-Remove-dead-code-from-gfx8-gfx9-trap-hand.patch
new file mode 100644
index 00000000..1d95615b
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3120-drm-amdkfd-Remove-dead-code-from-gfx8-gfx9-trap-hand.patch
@@ -0,0 +1,1254 @@
+From 230da4ce3763dee028f54f5a280cb2fc025e8bbe Mon Sep 17 00:00:00 2001
+From: Jay Cornwall <jay.cornwall@amd.com>
+Date: Wed, 24 Jul 2019 12:26:08 -0500
+Subject: [PATCH 3120/4256] drm/amdkfd: Remove dead code from gfx8/gfx9 trap
+ handlers
+
+Signed-off-by: Jay Cornwall <jay.cornwall@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ .../drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 395 +-----------------
+ .../drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 375 +----------------
+ 2 files changed, 5 insertions(+), 765 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+index a47f5b933120..b195b7cd8a17 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+@@ -24,78 +24,6 @@
+ * PROJECT=vi ./sp3 cwsr_trap_handler_gfx8.asm -hex tmp.hex
+ */
+
+-/* HW (VI) source code for CWSR trap handler */
+-/* Version 18 + multiple trap handler */
+-
+-// this performance-optimal version was originally from Seven Xu at SRDC
+-
+-// Revison #18 --...
+-/* Rev History
+-** #1. Branch from gc dv. //gfxip/gfx8/main/src/test/suites/block/cs/sr/cs_trap_handler.sp3#1,#50, #51, #52-53(Skip, Already Fixed by PV), #54-56(merged),#57-58(mergerd, skiped-already fixed by PV)
+-** #4. SR Memory Layout:
+-** 1. VGPR-SGPR-HWREG-{LDS}
+-** 2. tba_hi.bits.26 - reconfigured as the first wave in tg bits, for defer Save LDS for a threadgroup.. performance concern..
+-** #5. Update: 1. Accurate g8sr_ts_save_d timestamp
+-** #6. Update: 1. Fix s_barrier usage; 2. VGPR s/r using swizzle buffer?(NoNeed, already matched the swizzle pattern, more investigation)
+-** #7. Update: 1. don't barrier if noLDS
+-** #8. Branch: 1. Branch to ver#0, which is very similar to gc dv version
+-** 2. Fix SQ issue by s_sleep 2
+-** #9. Update: 1. Fix scc restore failed issue, restore wave_status at last
+-** 2. optimize s_buffer save by burst 16sgprs...
+-** #10. Update 1. Optimize restore sgpr by busrt 16 sgprs.
+-** #11. Update 1. Add 2 more timestamp for debug version
+-** #12. Update 1. Add VGPR SR using DWx4, some case improve and some case drop performance
+-** #13. Integ 1. Always use MUBUF for PV trap shader...
+-** #14. Update 1. s_buffer_store soft clause...
+-** #15. Update 1. PERF - sclar write with glc:0/mtype0 to allow L2 combine. perf improvement a lot.
+-** #16. Update 1. PRRF - UNROLL LDS_DMA got 2500cycle save in IP tree
+-** #17. Update 1. FUNC - LDS_DMA has issues while ATC, replace with ds_read/buffer_store for save part[TODO restore part]
+-** 2. PERF - Save LDS before save VGPR to cover LDS save long latency...
+-** #18. Update 1. FUNC - Implicitly estore STATUS.VCCZ, which is not writable by s_setreg_b32
+-** 2. FUNC - Handle non-CWSR traps
+-*/
+-
+-var G8SR_WDMEM_HWREG_OFFSET = 0
+-var G8SR_WDMEM_SGPR_OFFSET = 128 // in bytes
+-
+-// Keep definition same as the app shader, These 2 time stamps are part of the app shader... Should before any Save and after restore.
+-
+-var G8SR_DEBUG_TIMESTAMP = 0
+-var G8SR_DEBUG_TS_SAVE_D_OFFSET = 40*4 // ts_save_d timestamp offset relative to SGPR_SR_memory_offset
+-var s_g8sr_ts_save_s = s[34:35] // save start
+-var s_g8sr_ts_sq_save_msg = s[36:37] // The save shader send SAVEWAVE msg to spi
+-var s_g8sr_ts_spi_wrexec = s[38:39] // the SPI write the sr address to SQ
+-var s_g8sr_ts_save_d = s[40:41] // save end
+-var s_g8sr_ts_restore_s = s[42:43] // restore start
+-var s_g8sr_ts_restore_d = s[44:45] // restore end
+-
+-var G8SR_VGPR_SR_IN_DWX4 = 0
+-var G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 = 0x00100000 // DWx4 stride is 4*4Bytes
+-var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4
+-
+-
+-/*************************************************************************/
+-/* control on how to run the shader */
+-/*************************************************************************/
+-//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run)
+-var EMU_RUN_HACK = 0
+-var EMU_RUN_HACK_RESTORE_NORMAL = 0
+-var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0
+-var EMU_RUN_HACK_SAVE_SINGLE_WAVE = 0
+-var EMU_RUN_HACK_SAVE_FIRST_TIME = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
+-var EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_LO = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
+-var EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_HI = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
+-var SAVE_LDS = 1
+-var WG_BASE_ADDR_LO = 0x9000a000
+-var WG_BASE_ADDR_HI = 0x0
+-var WAVE_SPACE = 0x5000 //memory size that each wave occupies in workgroup state mem
+-var CTX_SAVE_CONTROL = 0x0
+-var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL
+-var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run)
+-var SGPR_SAVE_USE_SQC = 1 //use SQC D$ to do the write
+-var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
+-var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing
+-
+ /**************************************************************************/
+ /* variables */
+ /**************************************************************************/
+@@ -226,16 +154,7 @@ shader main
+ type(CS)
+
+
+- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) //hack to use trap_id for determining save/restore
+- //FIXME VCCZ un-init assertion s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
+- s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000 //change SCC
+- s_cmp_eq_u32 s_save_tmp, 0x007e0000 //Save: trap_id = 0x7e. Restore: trap_id = 0x7f.
+- s_cbranch_scc0 L_JUMP_TO_RESTORE //do not need to recover STATUS here since we are going to RESTORE
+- //FIXME s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //need to recover STATUS since we are going to SAVE
+- s_branch L_SKIP_RESTORE //NOT restore, SAVE actually
+- else
+ s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save
+- end
+
+ L_JUMP_TO_RESTORE:
+ s_branch L_RESTORE //restore
+@@ -249,7 +168,7 @@ L_SKIP_RESTORE:
+ s_cbranch_scc1 L_SAVE //this is the operation for save
+
+ // ********* Handle non-CWSR traps *******************
+-if (!EMU_RUN_HACK)
++
+ /* read tba and tma for next level trap handler, ttmp4 is used as s_save_status */
+ s_load_dwordx4 [ttmp8,ttmp9,ttmp10, ttmp11], [tma_lo,tma_hi], 0
+ s_waitcnt lgkmcnt(0)
+@@ -268,7 +187,7 @@ L_EXCP_CASE:
+ s_and_b32 ttmp1, ttmp1, 0xFFFF
+ set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
+ s_rfe_b64 [ttmp0, ttmp1]
+-end
++
+ // ********* End handling of non-CWSR traps *******************
+
+ /**************************************************************************/
+@@ -276,12 +195,6 @@ end
+ /**************************************************************************/
+
+ L_SAVE:
+-
+-if G8SR_DEBUG_TIMESTAMP
+- s_memrealtime s_g8sr_ts_save_s
+- s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
+-end
+-
+ s_mov_b32 s_save_tmp, 0 //clear saveCtx bit
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit
+
+@@ -303,16 +216,7 @@ end
+ s_mov_b32 s_save_exec_hi, exec_hi
+ s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive
+
+-if G8SR_DEBUG_TIMESTAMP
+- s_memrealtime s_g8sr_ts_sq_save_msg
+- s_waitcnt lgkmcnt(0)
+-end
+-
+- if (EMU_RUN_HACK)
+-
+- else
+ s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
+- end
+
+ // Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for.
+ s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT)
+@@ -321,36 +225,9 @@ end
+ L_SLEEP:
+ s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
+
+- if (EMU_RUN_HACK)
+-
+- else
+ s_cbranch_execz L_SLEEP
+- end
+-
+-if G8SR_DEBUG_TIMESTAMP
+- s_memrealtime s_g8sr_ts_spi_wrexec
+- s_waitcnt lgkmcnt(0)
+-end
+
+ /* setup Resource Contants */
+- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE))
+- //calculate wd_addr using absolute thread id
+- v_readlane_b32 s_save_tmp, v9, 0
+- s_lshr_b32 s_save_tmp, s_save_tmp, 6
+- s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE
+- s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
+- s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
+- s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
+- else
+- end
+- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE))
+- s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
+- s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
+- s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
+- else
+- end
+-
+-
+ s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo
+ s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
+@@ -383,22 +260,10 @@ end
+
+
+ s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
+- if (SWIZZLE_EN)
+- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+- else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- end
+
+
+ write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) //M0
+-
+- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME))
+- s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
+- s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
+- s_mov_b32 tba_lo, EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_LO
+- s_mov_b32 tba_hi, EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_HI
+- end
+-
+ write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset) //PC
+ write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset) //EXEC
+@@ -440,18 +305,8 @@ end
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
+
+- if (SGPR_SAVE_USE_SQC)
+ s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 2 //NUM_RECORDS in bytes
+- else
+- s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads)
+- end
+-
+- if (SWIZZLE_EN)
+- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+- else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- end
+-
+
+ // backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0
+ //s_mov_b64 s_save_pc_lo, s_save_buf_rsrc0
+@@ -490,30 +345,14 @@ end
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+
+- if (SWIZZLE_EN)
+- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+- else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- end
+-
+
+ // VGPR Allocated in 4-GPR granularity
+
+-if G8SR_VGPR_SR_IN_DWX4
+- // the const stride for DWx4 is 4*4 bytes
+- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
+-
+- buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+-
+- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes
+-else
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
+-end
+
+
+
+@@ -549,64 +388,10 @@ end
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
+
+
+- if (SWIZZLE_EN)
+- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+- else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- end
+-
+ s_mov_b32 m0, 0x0 //lds_offset initial value = 0
+
+
+-var LDS_DMA_ENABLE = 0
+-var UNROLL = 0
+-if UNROLL==0 && LDS_DMA_ENABLE==1
+- s_mov_b32 s3, 256*2
+- s_nop 0
+- s_nop 0
+- s_nop 0
+- L_SAVE_LDS_LOOP:
+- //TODO: looks the 2 buffer_store/load clause for s/r will hurt performance.???
+- if (SAVE_LDS) //SPI always alloc LDS space in 128DW granularity
+- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 // first 64DW
+- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
+- end
+-
+- s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes
+- s_add_u32 s_save_mem_offset, s_save_mem_offset, s3 //mem offset increased by 256 bytes
+- s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
+- s_cbranch_scc1 L_SAVE_LDS_LOOP //LDS save is complete?
+-
+-elsif LDS_DMA_ENABLE==1 && UNROLL==1 // UNROOL , has ichace miss
+- // store from higest LDS address to lowest
+- s_mov_b32 s3, 256*2
+- s_sub_u32 m0, s_save_alloc_size, s3
+- s_add_u32 s_save_mem_offset, s_save_mem_offset, m0
+- s_lshr_b32 s_save_alloc_size, s_save_alloc_size, 9 // how many 128 trunks...
+- s_sub_u32 s_save_alloc_size, 128, s_save_alloc_size // store from higheset addr to lowest
+- s_mul_i32 s_save_alloc_size, s_save_alloc_size, 6*4 // PC offset increment, each LDS save block cost 6*4 Bytes instruction
+- s_add_u32 s_save_alloc_size, s_save_alloc_size, 3*4 //2is the below 2 inst...//s_addc and s_setpc
+- s_nop 0
+- s_nop 0
+- s_nop 0 //pad 3 dw to let LDS_DMA align with 64Bytes
+- s_getpc_b64 s[0:1] // reuse s[0:1], since s[0:1] already saved
+- s_add_u32 s0, s0,s_save_alloc_size
+- s_addc_u32 s1, s1, 0
+- s_setpc_b64 s[0:1]
+-
+-
+- for var i =0; i< 128; i++
+- // be careful to make here a 64Byte aligned address, which could improve performance...
+- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:0 // first 64DW
+- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
+-
+- if i!=127
+- s_sub_u32 m0, m0, s3 // use a sgpr to shrink 2DW-inst to 1DW inst to improve performance , i.e. pack more LDS_DMA inst to one Cacheline
+- s_sub_u32 s_save_mem_offset, s_save_mem_offset, s3
+- end
+- end
+-
+-else // BUFFER_STORE
+ v_mbcnt_lo_u32_b32 v2, 0xffffffff, 0x0
+ v_mbcnt_hi_u32_b32 v3, 0xffffffff, v2 // tid
+ v_mul_i32_i24 v2, v3, 8 // tid*8
+@@ -628,8 +413,6 @@ L_SAVE_LDS_LOOP_VECTOR:
+ // restore rsrc3
+ s_mov_b32 s_save_buf_rsrc3, s0
+
+-end
+-
+ L_SAVE_LDS_DONE:
+
+
+@@ -647,44 +430,8 @@ L_SAVE_LDS_DONE:
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) //FIXME for GFX, zero is possible
+ s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
+- if (SWIZZLE_EN)
+- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+- else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- end
+-
+-
+- // VGPR Allocated in 4-GPR granularity
+-
+-if G8SR_VGPR_SR_IN_DWX4
+- // the const stride for DWx4 is 4*4 bytes
+- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
+-
+- s_mov_b32 m0, 4 // skip first 4 VGPRs
+- s_cmp_lt_u32 m0, s_save_alloc_size
+- s_cbranch_scc0 L_SAVE_VGPR_LOOP_END // no more vgprs
+
+- s_set_gpr_idx_on m0, 0x1 // This will change M0
+- s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 // because above inst change m0
+-L_SAVE_VGPR_LOOP:
+- v_mov_b32 v0, v0 // v0 = v[0+m0]
+- v_mov_b32 v1, v1
+- v_mov_b32 v2, v2
+- v_mov_b32 v3, v3
+-
+-
+- buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+- s_add_u32 m0, m0, 4
+- s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4
+- s_cmp_lt_u32 m0, s_save_alloc_size
+- s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete?
+- s_set_gpr_idx_off
+-L_SAVE_VGPR_LOOP_END:
+-
+- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes
+-else
+ // VGPR store using dw burst
+ s_mov_b32 m0, 0x4 //VGPR initial index value =0
+ s_cmp_lt_u32 m0, s_save_alloc_size
+@@ -700,52 +447,18 @@ else
+ v_mov_b32 v2, v2 //v0 = v[0+m0]
+ v_mov_b32 v3, v3 //v0 = v[0+m0]
+
+- if(USE_MTBUF_INSTEAD_OF_MUBUF)
+- tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
+- else
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
+- end
+
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete?
+ s_set_gpr_idx_off
+-end
+
+ L_SAVE_VGPR_END:
+-
+-
+-
+-
+-
+-
+- /* S_PGM_END_SAVED */ //FIXME graphics ONLY
+- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT))
+- s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
+- s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
+- s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
+- s_rfe_b64 s_save_pc_lo //Return to the main shader program
+- else
+- end
+-
+-// Save Done timestamp
+-if G8SR_DEBUG_TIMESTAMP
+- s_memrealtime s_g8sr_ts_save_d
+- // SGPR SR memory offset : size(VGPR)
+- get_vgpr_size_bytes(s_save_mem_offset)
+- s_add_u32 s_save_mem_offset, s_save_mem_offset, G8SR_DEBUG_TS_SAVE_D_OFFSET
+- s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
+- // Need reset rsrc2??
+- s_mov_b32 m0, s_save_mem_offset
+- s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- s_buffer_store_dwordx2 s_g8sr_ts_save_d, s_save_buf_rsrc0, m0 glc:1
+-end
+-
+-
+ s_branch L_END_PGM
+
+
+@@ -756,27 +469,6 @@ end
+
+ L_RESTORE:
+ /* Setup Resource Contants */
+- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
+- //calculate wd_addr using absolute thread id
+- v_readlane_b32 s_restore_tmp, v9, 0
+- s_lshr_b32 s_restore_tmp, s_restore_tmp, 6
+- s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE
+- s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO
+- s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI
+- s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL
+- else
+- end
+-
+-if G8SR_DEBUG_TIMESTAMP
+- s_memrealtime s_g8sr_ts_restore_s
+- s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
+- // tma_lo/hi are sgpr 110, 111, which will not used for 112 SGPR allocated case...
+- s_mov_b32 s_restore_pc_lo, s_g8sr_ts_restore_s[0]
+- s_mov_b32 s_restore_pc_hi, s_g8sr_ts_restore_s[1] //backup ts to ttmp0/1, sicne exec will be finally restored..
+-end
+-
+-
+-
+ s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo
+ s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
+@@ -818,18 +510,12 @@ end
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes() //FIXME, Check if offset overflow???
+
+
+- if (SWIZZLE_EN)
+- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+- else
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- end
+ s_mov_b32 m0, 0x0 //lds_offset initial value = 0
+
+ L_RESTORE_LDS_LOOP:
+- if (SAVE_LDS)
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:256 // second 64DW
+- end
+ s_add_u32 m0, m0, 256*2 // 128 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*2 //mem offset increased by 128DW
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
+@@ -848,40 +534,8 @@ end
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
+ s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
+- if (SWIZZLE_EN)
+- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+- else
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- end
+-
+-if G8SR_VGPR_SR_IN_DWX4
+- get_vgpr_size_bytes(s_restore_mem_offset)
+- s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+-
+- // the const stride for DWx4 is 4*4 bytes
+- s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+- s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
+-
+- s_mov_b32 m0, s_restore_alloc_size
+- s_set_gpr_idx_on m0, 0x8 // Note.. This will change m0
+-
+-L_RESTORE_VGPR_LOOP:
+- buffer_load_dwordx4 v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
+- s_waitcnt vmcnt(0)
+- s_sub_u32 m0, m0, 4
+- v_mov_b32 v0, v0 // v[0+m0] = v0
+- v_mov_b32 v1, v1
+- v_mov_b32 v2, v2
+- v_mov_b32 v3, v3
+- s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+- s_cmp_eq_u32 m0, 0x8000
+- s_cbranch_scc0 L_RESTORE_VGPR_LOOP
+- s_set_gpr_idx_off
+-
+- s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+- s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE // const stride to 4*4 bytes
+-
+-else
++
+ // VGPR load using dw burst
+ s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+@@ -890,14 +544,10 @@ else
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later
+
+ L_RESTORE_VGPR_LOOP:
+- if(USE_MTBUF_INSTEAD_OF_MUBUF)
+- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
+- else
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*3
+- end
+ s_waitcnt vmcnt(0) //ensure data ready
+ v_mov_b32 v0, v0 //v[0+m0] = v0
+ v_mov_b32 v1, v1
+@@ -909,16 +559,10 @@ else
+ s_cbranch_scc1 L_RESTORE_VGPR_LOOP //VGPR restore (except v0) is complete?
+ s_set_gpr_idx_off
+ /* VGPR restore on v0 */
+- if(USE_MTBUF_INSTEAD_OF_MUBUF)
+- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
+- else
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3
+- end
+-
+-end
+
+ /* restore SGPRs */
+ //////////////////////////////
+@@ -934,16 +578,8 @@ end
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
+
+- if (SGPR_SAVE_USE_SQC)
+ s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 2 //NUM_RECORDS in bytes
+- else
+- s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads)
+- end
+- if (SWIZZLE_EN)
+- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+- else
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- end
+
+ /* If 112 SGPRs ar allocated, 4 sgprs are not used TBA(108,109),TMA(110,111),
+ However, we are safe to restore these 4 SGPRs anyway, since TBA,TMA will later be restored by HWREG
+@@ -972,12 +608,6 @@ end
+ //////////////////////////////
+ L_RESTORE_HWREG:
+
+-
+-if G8SR_DEBUG_TIMESTAMP
+- s_mov_b32 s_g8sr_ts_restore_s[0], s_restore_pc_lo
+- s_mov_b32 s_g8sr_ts_restore_s[1], s_restore_pc_hi
+-end
+-
+ // HWREG SR memory offset : size(VGPR)+size(SGPR)
+ get_vgpr_size_bytes(s_restore_mem_offset)
+ get_sgpr_size_bytes(s_restore_tmp)
+@@ -985,11 +615,7 @@ end
+
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
+- if (SWIZZLE_EN)
+- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+- else
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- end
+
+ read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset) //M0
+ read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //PC
+@@ -1006,16 +632,6 @@ end
+
+ s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS
+
+- //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
+- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
+- s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore)
+- s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
+- end
+- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL))
+- s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4 //pc[31:0]+4 // save is hack through s_trap but restore is normal
+- s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
+- end
+-
+ s_mov_b32 m0, s_restore_m0
+ s_mov_b32 exec_lo, s_restore_exec_lo
+ s_mov_b32 exec_hi, s_restore_exec_hi
+@@ -1048,11 +664,6 @@ end
+
+ s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
+
+-if G8SR_DEBUG_TIMESTAMP
+- s_memrealtime s_g8sr_ts_restore_d
+- s_waitcnt lgkmcnt(0)
+-end
+-
+ // s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
+ s_rfe_restore_b64 s_restore_pc_lo, s_restore_m0 // s_restore_m0[0] is used to set STATUS.inst_atc
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+index cee4cfd5182d..75f29d13c90f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+@@ -24,75 +24,6 @@
+ * PROJECT=greenland ./sp3 cwsr_trap_handler_gfx9.asm -hex tmp.hex
+ */
+
+-/* HW (GFX9) source code for CWSR trap handler */
+-/* Version 18 + multiple trap handler */
+-
+-// this performance-optimal version was originally from Seven Xu at SRDC
+-
+-// Revison #18 --...
+-/* Rev History
+-** #1. Branch from gc dv. //gfxip/gfx9/main/src/test/suites/block/cs/sr/cs_trap_handler.sp3#1,#50, #51, #52-53(Skip, Already Fixed by PV), #54-56(merged),#57-58(mergerd, skiped-already fixed by PV)
+-** #4. SR Memory Layout:
+-** 1. VGPR-SGPR-HWREG-{LDS}
+-** 2. tba_hi.bits.26 - reconfigured as the first wave in tg bits, for defer Save LDS for a threadgroup.. performance concern..
+-** #5. Update: 1. Accurate g8sr_ts_save_d timestamp
+-** #6. Update: 1. Fix s_barrier usage; 2. VGPR s/r using swizzle buffer?(NoNeed, already matched the swizzle pattern, more investigation)
+-** #7. Update: 1. don't barrier if noLDS
+-** #8. Branch: 1. Branch to ver#0, which is very similar to gc dv version
+-** 2. Fix SQ issue by s_sleep 2
+-** #9. Update: 1. Fix scc restore failed issue, restore wave_status at last
+-** 2. optimize s_buffer save by burst 16sgprs...
+-** #10. Update 1. Optimize restore sgpr by busrt 16 sgprs.
+-** #11. Update 1. Add 2 more timestamp for debug version
+-** #12. Update 1. Add VGPR SR using DWx4, some case improve and some case drop performance
+-** #13. Integ 1. Always use MUBUF for PV trap shader...
+-** #14. Update 1. s_buffer_store soft clause...
+-** #15. Update 1. PERF - sclar write with glc:0/mtype0 to allow L2 combine. perf improvement a lot.
+-** #16. Update 1. PRRF - UNROLL LDS_DMA got 2500cycle save in IP tree
+-** #17. Update 1. FUNC - LDS_DMA has issues while ATC, replace with ds_read/buffer_store for save part[TODO restore part]
+-** 2. PERF - Save LDS before save VGPR to cover LDS save long latency...
+-** #18. Update 1. FUNC - Implicitly estore STATUS.VCCZ, which is not writable by s_setreg_b32
+-** 2. FUNC - Handle non-CWSR traps
+-*/
+-
+-var G8SR_WDMEM_HWREG_OFFSET = 0
+-var G8SR_WDMEM_SGPR_OFFSET = 128 // in bytes
+-
+-// Keep definition same as the app shader, These 2 time stamps are part of the app shader... Should before any Save and after restore.
+-
+-var G8SR_DEBUG_TIMESTAMP = 0
+-var G8SR_DEBUG_TS_SAVE_D_OFFSET = 40*4 // ts_save_d timestamp offset relative to SGPR_SR_memory_offset
+-var s_g8sr_ts_save_s = s[34:35] // save start
+-var s_g8sr_ts_sq_save_msg = s[36:37] // The save shader send SAVEWAVE msg to spi
+-var s_g8sr_ts_spi_wrexec = s[38:39] // the SPI write the sr address to SQ
+-var s_g8sr_ts_save_d = s[40:41] // save end
+-var s_g8sr_ts_restore_s = s[42:43] // restore start
+-var s_g8sr_ts_restore_d = s[44:45] // restore end
+-
+-var G8SR_VGPR_SR_IN_DWX4 = 0
+-var G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 = 0x00100000 // DWx4 stride is 4*4Bytes
+-var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4
+-
+-
+-/*************************************************************************/
+-/* control on how to run the shader */
+-/*************************************************************************/
+-//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run)
+-var EMU_RUN_HACK = 0
+-var EMU_RUN_HACK_RESTORE_NORMAL = 0
+-var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0
+-var EMU_RUN_HACK_SAVE_SINGLE_WAVE = 0
+-var EMU_RUN_HACK_SAVE_FIRST_TIME = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
+-var SAVE_LDS = 1
+-var WG_BASE_ADDR_LO = 0x9000a000
+-var WG_BASE_ADDR_HI = 0x0
+-var WAVE_SPACE = 0x5000 //memory size that each wave occupies in workgroup state mem
+-var CTX_SAVE_CONTROL = 0x0
+-var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL
+-var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run)
+-var SGPR_SAVE_USE_SQC = 1 //use SQC D$ to do the write
+-var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
+-var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing
+ var ACK_SQC_STORE = 1 //workaround for suspected SQC store bug causing incorrect stores under concurrency
+ var SAVE_AFTER_XNACK_ERROR = 1 //workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger
+ var SINGLE_STEP_MISSED_WORKAROUND = 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
+@@ -238,16 +169,7 @@ shader main
+ type(CS)
+
+
+- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) //hack to use trap_id for determining save/restore
+- //FIXME VCCZ un-init assertion s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
+- s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000 //change SCC
+- s_cmp_eq_u32 s_save_tmp, 0x007e0000 //Save: trap_id = 0x7e. Restore: trap_id = 0x7f.
+- s_cbranch_scc0 L_JUMP_TO_RESTORE //do not need to recover STATUS here since we are going to RESTORE
+- //FIXME s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //need to recover STATUS since we are going to SAVE
+- s_branch L_SKIP_RESTORE //NOT restore, SAVE actually
+- else
+ s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save
+- end
+
+ L_JUMP_TO_RESTORE:
+ s_branch L_RESTORE //restore
+@@ -278,7 +200,7 @@ end
+ s_cbranch_scc1 L_SAVE //this is the operation for save
+
+ // ********* Handle non-CWSR traps *******************
+-if (!EMU_RUN_HACK)
++
+ // Illegal instruction is a non-maskable exception which blocks context save.
+ // Halt the wavefront and return from the trap.
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
+@@ -355,7 +277,7 @@ L_EXCP_CASE:
+ set_status_without_spi_prio(s_save_status, ttmp2)
+
+ s_rfe_b64 [ttmp0, ttmp1]
+-end
++
+ // ********* End handling of non-CWSR traps *******************
+
+ /**************************************************************************/
+@@ -363,12 +285,6 @@ end
+ /**************************************************************************/
+
+ L_SAVE:
+-
+-if G8SR_DEBUG_TIMESTAMP
+- s_memrealtime s_g8sr_ts_save_s
+- s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
+-end
+-
+ s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
+
+ s_mov_b32 s_save_tmp, 0 //clear saveCtx bit
+@@ -390,16 +306,7 @@ end
+ s_mov_b32 s_save_exec_hi, exec_hi
+ s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive
+
+-if G8SR_DEBUG_TIMESTAMP
+- s_memrealtime s_g8sr_ts_sq_save_msg
+- s_waitcnt lgkmcnt(0)
+-end
+-
+- if (EMU_RUN_HACK)
+-
+- else
+ s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
+- end
+
+ // Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for.
+ s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT)
+@@ -408,33 +315,7 @@ end
+ L_SLEEP:
+ s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
+
+- if (EMU_RUN_HACK)
+-
+- else
+ s_cbranch_execz L_SLEEP
+- end
+-
+-if G8SR_DEBUG_TIMESTAMP
+- s_memrealtime s_g8sr_ts_spi_wrexec
+- s_waitcnt lgkmcnt(0)
+-end
+-
+- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE))
+- //calculate wd_addr using absolute thread id
+- v_readlane_b32 s_save_tmp, v9, 0
+- s_lshr_b32 s_save_tmp, s_save_tmp, 6
+- s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE
+- s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
+- s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
+- s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
+- else
+- end
+- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE))
+- s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
+- s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
+- s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
+- else
+- end
+
+ // Save trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
+ // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
+@@ -484,20 +365,10 @@ end
+
+
+ s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
+- if (SWIZZLE_EN)
+- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+- else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- end
+
+
+ write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) //M0
+-
+- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME))
+- s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
+- s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
+- end
+-
+ write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset) //PC
+ write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset) //EXEC
+@@ -535,17 +406,9 @@ end
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
+
+- if (SGPR_SAVE_USE_SQC)
+ s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 2 //NUM_RECORDS in bytes
+- else
+- s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads)
+- end
+
+- if (SWIZZLE_EN)
+- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+- else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- end
+
+
+ // backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0
+@@ -588,25 +451,11 @@ end
+ s_mov_b32 xnack_mask_lo, 0x0
+ s_mov_b32 xnack_mask_hi, 0x0
+
+- if (SWIZZLE_EN)
+- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+- else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- end
+
+
+ // VGPR Allocated in 4-GPR granularity
+
+-if G8SR_VGPR_SR_IN_DWX4
+- // the const stride for DWx4 is 4*4 bytes
+- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
+-
+- buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+-
+- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes
+-else
+ if SAVE_AFTER_XNACK_ERROR
+ check_if_tcp_store_ok()
+ s_cbranch_scc1 L_SAVE_FIRST_VGPRS_WITH_TCP
+@@ -621,7 +470,6 @@ end
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
+-end
+
+
+
+@@ -656,64 +504,11 @@ end
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
+
+
+- if (SWIZZLE_EN)
+- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+- else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- end
+
+ s_mov_b32 m0, 0x0 //lds_offset initial value = 0
+
+
+-var LDS_DMA_ENABLE = 0
+-var UNROLL = 0
+-if UNROLL==0 && LDS_DMA_ENABLE==1
+- s_mov_b32 s3, 256*2
+- s_nop 0
+- s_nop 0
+- s_nop 0
+- L_SAVE_LDS_LOOP:
+- //TODO: looks the 2 buffer_store/load clause for s/r will hurt performance.???
+- if (SAVE_LDS) //SPI always alloc LDS space in 128DW granularity
+- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 // first 64DW
+- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
+- end
+-
+- s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes
+- s_add_u32 s_save_mem_offset, s_save_mem_offset, s3 //mem offset increased by 256 bytes
+- s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
+- s_cbranch_scc1 L_SAVE_LDS_LOOP //LDS save is complete?
+-
+-elsif LDS_DMA_ENABLE==1 && UNROLL==1 // UNROOL , has ichace miss
+- // store from higest LDS address to lowest
+- s_mov_b32 s3, 256*2
+- s_sub_u32 m0, s_save_alloc_size, s3
+- s_add_u32 s_save_mem_offset, s_save_mem_offset, m0
+- s_lshr_b32 s_save_alloc_size, s_save_alloc_size, 9 // how many 128 trunks...
+- s_sub_u32 s_save_alloc_size, 128, s_save_alloc_size // store from higheset addr to lowest
+- s_mul_i32 s_save_alloc_size, s_save_alloc_size, 6*4 // PC offset increment, each LDS save block cost 6*4 Bytes instruction
+- s_add_u32 s_save_alloc_size, s_save_alloc_size, 3*4 //2is the below 2 inst...//s_addc and s_setpc
+- s_nop 0
+- s_nop 0
+- s_nop 0 //pad 3 dw to let LDS_DMA align with 64Bytes
+- s_getpc_b64 s[0:1] // reuse s[0:1], since s[0:1] already saved
+- s_add_u32 s0, s0,s_save_alloc_size
+- s_addc_u32 s1, s1, 0
+- s_setpc_b64 s[0:1]
+-
+-
+- for var i =0; i< 128; i++
+- // be careful to make here a 64Byte aligned address, which could improve performance...
+- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:0 // first 64DW
+- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
+-
+- if i!=127
+- s_sub_u32 m0, m0, s3 // use a sgpr to shrink 2DW-inst to 1DW inst to improve performance , i.e. pack more LDS_DMA inst to one Cacheline
+- s_sub_u32 s_save_mem_offset, s_save_mem_offset, s3
+- end
+- end
+-
+-else // BUFFER_STORE
+ v_mbcnt_lo_u32_b32 v2, 0xffffffff, 0x0
+ v_mbcnt_hi_u32_b32 v3, 0xffffffff, v2 // tid
+
+@@ -757,8 +552,6 @@ L_SAVE_LDS_LOOP_VECTOR:
+ // restore rsrc3
+ s_mov_b32 s_save_buf_rsrc3, s0
+
+-end
+-
+ L_SAVE_LDS_DONE:
+
+
+@@ -776,44 +569,9 @@ L_SAVE_LDS_DONE:
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) //FIXME for GFX, zero is possible
+ s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
+- if (SWIZZLE_EN)
+- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+- else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- end
+
+
+- // VGPR Allocated in 4-GPR granularity
+-
+-if G8SR_VGPR_SR_IN_DWX4
+- // the const stride for DWx4 is 4*4 bytes
+- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
+-
+- s_mov_b32 m0, 4 // skip first 4 VGPRs
+- s_cmp_lt_u32 m0, s_save_alloc_size
+- s_cbranch_scc0 L_SAVE_VGPR_LOOP_END // no more vgprs
+-
+- s_set_gpr_idx_on m0, 0x1 // This will change M0
+- s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 // because above inst change m0
+-L_SAVE_VGPR_LOOP:
+- v_mov_b32 v0, v0 // v0 = v[0+m0]
+- v_mov_b32 v1, v1
+- v_mov_b32 v2, v2
+- v_mov_b32 v3, v3
+-
+-
+- buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+- s_add_u32 m0, m0, 4
+- s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4
+- s_cmp_lt_u32 m0, s_save_alloc_size
+- s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete?
+- s_set_gpr_idx_off
+-L_SAVE_VGPR_LOOP_END:
+-
+- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes
+-else
+ // VGPR store using dw burst
+ s_mov_b32 m0, 0x4 //VGPR initial index value =0
+ s_cmp_lt_u32 m0, s_save_alloc_size
+@@ -844,21 +602,16 @@ end
+ v_mov_b32 v2, v2 //v0 = v[0+m0]
+ v_mov_b32 v3, v3 //v0 = v[0+m0]
+
+- if(USE_MTBUF_INSTEAD_OF_MUBUF)
+- tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
+- else
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
+- end
+
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete?
+ s_set_gpr_idx_off
+-end
+
+ L_SAVE_VGPR_END:
+
+@@ -905,29 +658,6 @@ L_SAVE_ACCVGPR_LOOP:
+ L_SAVE_ACCVGPR_END:
+ end
+
+- /* S_PGM_END_SAVED */ //FIXME graphics ONLY
+- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT))
+- s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
+- s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
+- s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
+- s_rfe_b64 s_save_pc_lo //Return to the main shader program
+- else
+- end
+-
+-// Save Done timestamp
+-if G8SR_DEBUG_TIMESTAMP
+- s_memrealtime s_g8sr_ts_save_d
+- // SGPR SR memory offset : size(VGPR)
+- get_vgpr_size_bytes(s_save_mem_offset)
+- s_add_u32 s_save_mem_offset, s_save_mem_offset, G8SR_DEBUG_TS_SAVE_D_OFFSET
+- s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
+- // Need reset rsrc2??
+- s_mov_b32 m0, s_save_mem_offset
+- s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- s_buffer_store_dwordx2 s_g8sr_ts_save_d, s_save_buf_rsrc0, m0 glc:1
+-end
+-
+-
+ s_branch L_END_PGM
+
+
+@@ -938,27 +668,6 @@ end
+
+ L_RESTORE:
+ /* Setup Resource Contants */
+- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
+- //calculate wd_addr using absolute thread id
+- v_readlane_b32 s_restore_tmp, v9, 0
+- s_lshr_b32 s_restore_tmp, s_restore_tmp, 6
+- s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE
+- s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO
+- s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI
+- s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL
+- else
+- end
+-
+-if G8SR_DEBUG_TIMESTAMP
+- s_memrealtime s_g8sr_ts_restore_s
+- s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
+- // tma_lo/hi are sgpr 110, 111, which will not used for 112 SGPR allocated case...
+- s_mov_b32 s_restore_pc_lo, s_g8sr_ts_restore_s[0]
+- s_mov_b32 s_restore_pc_hi, s_g8sr_ts_restore_s[1] //backup ts to ttmp0/1, sicne exec will be finally restored..
+-end
+-
+-
+-
+ s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo
+ s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
+@@ -1000,18 +709,12 @@ end
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes() //FIXME, Check if offset overflow???
+
+
+- if (SWIZZLE_EN)
+- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+- else
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- end
+ s_mov_b32 m0, 0x0 //lds_offset initial value = 0
+
+ L_RESTORE_LDS_LOOP:
+- if (SAVE_LDS)
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:256 // second 64DW
+- end
+ s_add_u32 m0, m0, 256*2 // 128 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*2 //mem offset increased by 128DW
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
+@@ -1035,40 +738,8 @@ if ASIC_TARGET_ARCTURUS
+ s_mov_b32 s_restore_accvgpr_offset, s_restore_buf_rsrc2 //ACC VGPRs at end of VGPRs
+ end
+
+- if (SWIZZLE_EN)
+- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+- else
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- end
+
+-if G8SR_VGPR_SR_IN_DWX4
+- get_vgpr_size_bytes(s_restore_mem_offset)
+- s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+-
+- // the const stride for DWx4 is 4*4 bytes
+- s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+- s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
+-
+- s_mov_b32 m0, s_restore_alloc_size
+- s_set_gpr_idx_on m0, 0x8 // Note.. This will change m0
+-
+-L_RESTORE_VGPR_LOOP:
+- buffer_load_dwordx4 v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
+- s_waitcnt vmcnt(0)
+- s_sub_u32 m0, m0, 4
+- v_mov_b32 v0, v0 // v[0+m0] = v0
+- v_mov_b32 v1, v1
+- v_mov_b32 v2, v2
+- v_mov_b32 v3, v3
+- s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+- s_cmp_eq_u32 m0, 0x8000
+- s_cbranch_scc0 L_RESTORE_VGPR_LOOP
+- s_set_gpr_idx_off
+-
+- s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+- s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE // const stride to 4*4 bytes
+-
+-else
+ // VGPR load using dw burst
+ s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+@@ -1081,9 +752,6 @@ end
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later
+
+ L_RESTORE_VGPR_LOOP:
+- if(USE_MTBUF_INSTEAD_OF_MUBUF)
+- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
+- else
+
+ if ASIC_TARGET_ARCTURUS
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset slc:1 glc:1
+@@ -1102,7 +770,6 @@ end
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*3
+- end
+ s_waitcnt vmcnt(0) //ensure data ready
+ v_mov_b32 v0, v0 //v[0+m0] = v0
+ v_mov_b32 v1, v1
+@@ -1126,16 +793,10 @@ if ASIC_TARGET_ARCTURUS
+ end
+ end
+
+- if(USE_MTBUF_INSTEAD_OF_MUBUF)
+- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
+- else
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3
+- end
+-
+-end
+
+ /* restore SGPRs */
+ //////////////////////////////
+@@ -1151,16 +812,8 @@ end
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
+
+- if (SGPR_SAVE_USE_SQC)
+ s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 2 //NUM_RECORDS in bytes
+- else
+- s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads)
+- end
+- if (SWIZZLE_EN)
+- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+- else
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- end
+
+ s_mov_b32 m0, s_restore_alloc_size
+
+@@ -1188,11 +841,6 @@ end
+ L_RESTORE_HWREG:
+
+
+-if G8SR_DEBUG_TIMESTAMP
+- s_mov_b32 s_g8sr_ts_restore_s[0], s_restore_pc_lo
+- s_mov_b32 s_g8sr_ts_restore_s[1], s_restore_pc_hi
+-end
+-
+ // HWREG SR memory offset : size(VGPR)+size(SGPR)
+ get_vgpr_size_bytes(s_restore_mem_offset)
+ get_sgpr_size_bytes(s_restore_tmp)
+@@ -1200,11 +848,7 @@ end
+
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
+- if (SWIZZLE_EN)
+- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+- else
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+- end
+
+ read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset) //M0
+ read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //PC
+@@ -1219,16 +863,6 @@ end
+
+ s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS
+
+- //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
+- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
+- s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore)
+- s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
+- end
+- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL))
+- s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4 //pc[31:0]+4 // save is hack through s_trap but restore is normal
+- s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
+- end
+-
+ s_mov_b32 m0, s_restore_m0
+ s_mov_b32 exec_lo, s_restore_exec_lo
+ s_mov_b32 exec_hi, s_restore_exec_hi
+@@ -1275,11 +909,6 @@ end
+
+ s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
+
+-if G8SR_DEBUG_TIMESTAMP
+- s_memrealtime s_g8sr_ts_restore_d
+- s_waitcnt lgkmcnt(0)
+-end
+-
+ // s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
+ s_rfe_restore_b64 s_restore_pc_lo, s_restore_m0 // s_restore_m0[0] is used to set STATUS.inst_atc
+
+--
+2.17.1
+