blob: e985d1cdc142bfc291eefa5e381a083afd27457b [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/pci.h>
25#include <linux/slab.h>
26
27#include "amdgpu.h"
28#include "amdgpu_atombios.h"
29#include "amdgpu_ih.h"
30#include "amdgpu_uvd.h"
31#include "amdgpu_vce.h"
32#include "amdgpu_ucode.h"
33#include "atom.h"
34#include "amd_pcie.h"
35
36#include "gmc/gmc_8_1_d.h"
37#include "gmc/gmc_8_1_sh_mask.h"
38
39#include "oss/oss_3_0_d.h"
40#include "oss/oss_3_0_sh_mask.h"
41
42#include "bif/bif_5_0_d.h"
43#include "bif/bif_5_0_sh_mask.h"
44
45#include "gca/gfx_8_0_d.h"
46#include "gca/gfx_8_0_sh_mask.h"
47
48#include "smu/smu_7_1_1_d.h"
49#include "smu/smu_7_1_1_sh_mask.h"
50
51#include "uvd/uvd_5_0_d.h"
52#include "uvd/uvd_5_0_sh_mask.h"
53
54#include "vce/vce_3_0_d.h"
55#include "vce/vce_3_0_sh_mask.h"
56
57#include "dce/dce_10_0_d.h"
58#include "dce/dce_10_0_sh_mask.h"
59
60#include "vid.h"
61#include "vi.h"
62#include "gmc_v8_0.h"
63#include "gmc_v7_0.h"
64#include "gfx_v8_0.h"
65#include "sdma_v2_4.h"
66#include "sdma_v3_0.h"
67#include "dce_v10_0.h"
68#include "dce_v11_0.h"
69#include "iceland_ih.h"
70#include "tonga_ih.h"
71#include "cz_ih.h"
72#include "uvd_v5_0.h"
73#include "uvd_v6_0.h"
74#include "vce_v3_0.h"
75#if defined(CONFIG_DRM_AMD_ACP)
76#include "amdgpu_acp.h"
77#endif
78#include "dce_virtual.h"
79#include "mxgpu_vi.h"
80#include "amdgpu_dm.h"
81
82/*
83 * Indirect registers accessor
84 */
85static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
86{
87 unsigned long flags;
88 u32 r;
89
90 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
91 WREG32_NO_KIQ(mmPCIE_INDEX, reg);
92 (void)RREG32_NO_KIQ(mmPCIE_INDEX);
93 r = RREG32_NO_KIQ(mmPCIE_DATA);
94 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
95 return r;
96}
97
98static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
99{
100 unsigned long flags;
101
102 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
103 WREG32_NO_KIQ(mmPCIE_INDEX, reg);
104 (void)RREG32_NO_KIQ(mmPCIE_INDEX);
105 WREG32_NO_KIQ(mmPCIE_DATA, v);
106 (void)RREG32_NO_KIQ(mmPCIE_DATA);
107 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
108}
109
110static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
111{
112 unsigned long flags;
113 u32 r;
114
115 spin_lock_irqsave(&adev->smc_idx_lock, flags);
116 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
117 r = RREG32_NO_KIQ(mmSMC_IND_DATA_11);
118 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
119 return r;
120}
121
122static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
123{
124 unsigned long flags;
125
126 spin_lock_irqsave(&adev->smc_idx_lock, flags);
127 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
128 WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
129 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
130}
131
132/* smu_8_0_d.h */
133#define mmMP0PUB_IND_INDEX 0x180
134#define mmMP0PUB_IND_DATA 0x181
135
136static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
137{
138 unsigned long flags;
139 u32 r;
140
141 spin_lock_irqsave(&adev->smc_idx_lock, flags);
142 WREG32(mmMP0PUB_IND_INDEX, (reg));
143 r = RREG32(mmMP0PUB_IND_DATA);
144 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
145 return r;
146}
147
148static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
149{
150 unsigned long flags;
151
152 spin_lock_irqsave(&adev->smc_idx_lock, flags);
153 WREG32(mmMP0PUB_IND_INDEX, (reg));
154 WREG32(mmMP0PUB_IND_DATA, (v));
155 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
156}
157
158static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
159{
160 unsigned long flags;
161 u32 r;
162
163 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
164 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
165 r = RREG32(mmUVD_CTX_DATA);
166 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
167 return r;
168}
169
170static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
171{
172 unsigned long flags;
173
174 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
175 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
176 WREG32(mmUVD_CTX_DATA, (v));
177 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
178}
179
180static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
181{
182 unsigned long flags;
183 u32 r;
184
185 spin_lock_irqsave(&adev->didt_idx_lock, flags);
186 WREG32(mmDIDT_IND_INDEX, (reg));
187 r = RREG32(mmDIDT_IND_DATA);
188 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
189 return r;
190}
191
192static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
193{
194 unsigned long flags;
195
196 spin_lock_irqsave(&adev->didt_idx_lock, flags);
197 WREG32(mmDIDT_IND_INDEX, (reg));
198 WREG32(mmDIDT_IND_DATA, (v));
199 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
200}
201
202static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
203{
204 unsigned long flags;
205 u32 r;
206
207 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
208 WREG32(mmGC_CAC_IND_INDEX, (reg));
209 r = RREG32(mmGC_CAC_IND_DATA);
210 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
211 return r;
212}
213
214static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
215{
216 unsigned long flags;
217
218 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
219 WREG32(mmGC_CAC_IND_INDEX, (reg));
220 WREG32(mmGC_CAC_IND_DATA, (v));
221 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
222}
223
224
225static const u32 tonga_mgcg_cgcg_init[] =
226{
227 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
228 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
229 mmPCIE_DATA, 0x000f0000, 0x00000000,
230 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
231 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
232 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
233 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
234};
235
236static const u32 fiji_mgcg_cgcg_init[] =
237{
238 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
239 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
240 mmPCIE_DATA, 0x000f0000, 0x00000000,
241 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
242 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
243 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
244 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
245};
246
247static const u32 iceland_mgcg_cgcg_init[] =
248{
249 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
250 mmPCIE_DATA, 0x000f0000, 0x00000000,
251 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
252 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
253 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
254};
255
256static const u32 cz_mgcg_cgcg_init[] =
257{
258 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
259 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
260 mmPCIE_DATA, 0x000f0000, 0x00000000,
261 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
262 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
263};
264
265static const u32 stoney_mgcg_cgcg_init[] =
266{
267 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
268 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
269 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
270};
271
272static void vi_init_golden_registers(struct amdgpu_device *adev)
273{
274 /* Some of the registers might be dependent on GRBM_GFX_INDEX */
275 mutex_lock(&adev->grbm_idx_mutex);
276
277 if (amdgpu_sriov_vf(adev)) {
278 xgpu_vi_init_golden_registers(adev);
279 mutex_unlock(&adev->grbm_idx_mutex);
280 return;
281 }
282
283 switch (adev->asic_type) {
284 case CHIP_TOPAZ:
285 amdgpu_device_program_register_sequence(adev,
286 iceland_mgcg_cgcg_init,
287 ARRAY_SIZE(iceland_mgcg_cgcg_init));
288 break;
289 case CHIP_FIJI:
290 amdgpu_device_program_register_sequence(adev,
291 fiji_mgcg_cgcg_init,
292 ARRAY_SIZE(fiji_mgcg_cgcg_init));
293 break;
294 case CHIP_TONGA:
295 amdgpu_device_program_register_sequence(adev,
296 tonga_mgcg_cgcg_init,
297 ARRAY_SIZE(tonga_mgcg_cgcg_init));
298 break;
299 case CHIP_CARRIZO:
300 amdgpu_device_program_register_sequence(adev,
301 cz_mgcg_cgcg_init,
302 ARRAY_SIZE(cz_mgcg_cgcg_init));
303 break;
304 case CHIP_STONEY:
305 amdgpu_device_program_register_sequence(adev,
306 stoney_mgcg_cgcg_init,
307 ARRAY_SIZE(stoney_mgcg_cgcg_init));
308 break;
309 case CHIP_POLARIS10:
310 case CHIP_POLARIS11:
311 case CHIP_POLARIS12:
312 case CHIP_VEGAM:
313 default:
314 break;
315 }
316 mutex_unlock(&adev->grbm_idx_mutex);
317}
318
319/**
320 * vi_get_xclk - get the xclk
321 *
322 * @adev: amdgpu_device pointer
323 *
324 * Returns the reference clock used by the gfx engine
325 * (VI).
326 */
327static u32 vi_get_xclk(struct amdgpu_device *adev)
328{
329 u32 reference_clock = adev->clock.spll.reference_freq;
330 u32 tmp;
331
332 if (adev->flags & AMD_IS_APU) {
333 switch (adev->asic_type) {
334 case CHIP_STONEY:
335 /* vbios says 48Mhz, but the actual freq is 100Mhz */
336 return 10000;
337 default:
338 return reference_clock;
339 }
340 }
341
342 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
343 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
344 return 1000;
345
346 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
347 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
348 return reference_clock / 4;
349
350 return reference_clock;
351}
352
353/**
354 * vi_srbm_select - select specific register instances
355 *
356 * @adev: amdgpu_device pointer
357 * @me: selected ME (micro engine)
358 * @pipe: pipe
359 * @queue: queue
360 * @vmid: VMID
361 *
362 * Switches the currently active registers instances. Some
363 * registers are instanced per VMID, others are instanced per
364 * me/pipe/queue combination.
365 */
366void vi_srbm_select(struct amdgpu_device *adev,
367 u32 me, u32 pipe, u32 queue, u32 vmid)
368{
369 u32 srbm_gfx_cntl = 0;
370 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
371 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
372 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
373 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
374 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
375}
376
377static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
378{
379 /* todo */
380}
381
382static bool vi_read_disabled_bios(struct amdgpu_device *adev)
383{
384 u32 bus_cntl;
385 u32 d1vga_control = 0;
386 u32 d2vga_control = 0;
387 u32 vga_render_control = 0;
388 u32 rom_cntl;
389 bool r;
390
391 bus_cntl = RREG32(mmBUS_CNTL);
392 if (adev->mode_info.num_crtc) {
393 d1vga_control = RREG32(mmD1VGA_CONTROL);
394 d2vga_control = RREG32(mmD2VGA_CONTROL);
395 vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
396 }
397 rom_cntl = RREG32_SMC(ixROM_CNTL);
398
399 /* enable the rom */
400 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
401 if (adev->mode_info.num_crtc) {
402 /* Disable VGA mode */
403 WREG32(mmD1VGA_CONTROL,
404 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
405 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
406 WREG32(mmD2VGA_CONTROL,
407 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
408 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
409 WREG32(mmVGA_RENDER_CONTROL,
410 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
411 }
412 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
413
414 r = amdgpu_read_bios(adev);
415
416 /* restore regs */
417 WREG32(mmBUS_CNTL, bus_cntl);
418 if (adev->mode_info.num_crtc) {
419 WREG32(mmD1VGA_CONTROL, d1vga_control);
420 WREG32(mmD2VGA_CONTROL, d2vga_control);
421 WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
422 }
423 WREG32_SMC(ixROM_CNTL, rom_cntl);
424 return r;
425}
426
427static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
428 u8 *bios, u32 length_bytes)
429{
430 u32 *dw_ptr;
431 unsigned long flags;
432 u32 i, length_dw;
433
434 if (bios == NULL)
435 return false;
436 if (length_bytes == 0)
437 return false;
438 /* APU vbios image is part of sbios image */
439 if (adev->flags & AMD_IS_APU)
440 return false;
441
442 dw_ptr = (u32 *)bios;
443 length_dw = ALIGN(length_bytes, 4) / 4;
444 /* take the smc lock since we are using the smc index */
445 spin_lock_irqsave(&adev->smc_idx_lock, flags);
446 /* set rom index to 0 */
447 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
448 WREG32(mmSMC_IND_DATA_11, 0);
449 /* set index to data for continous read */
450 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
451 for (i = 0; i < length_dw; i++)
452 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
453 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
454
455 return true;
456}
457
458static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
459{
460 uint32_t reg = 0;
461
462 if (adev->asic_type == CHIP_TONGA ||
463 adev->asic_type == CHIP_FIJI) {
464 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
465 /* bit0: 0 means pf and 1 means vf */
466 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
467 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
468 /* bit31: 0 means disable IOV and 1 means enable */
469 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
470 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
471 }
472
473 if (reg == 0) {
474 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
475 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
476 }
477}
478
479static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
480 {mmGRBM_STATUS},
481 {mmGRBM_STATUS2},
482 {mmGRBM_STATUS_SE0},
483 {mmGRBM_STATUS_SE1},
484 {mmGRBM_STATUS_SE2},
485 {mmGRBM_STATUS_SE3},
486 {mmSRBM_STATUS},
487 {mmSRBM_STATUS2},
488 {mmSRBM_STATUS3},
489 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
490 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
491 {mmCP_STAT},
492 {mmCP_STALLED_STAT1},
493 {mmCP_STALLED_STAT2},
494 {mmCP_STALLED_STAT3},
495 {mmCP_CPF_BUSY_STAT},
496 {mmCP_CPF_STALLED_STAT1},
497 {mmCP_CPF_STATUS},
498 {mmCP_CPC_BUSY_STAT},
499 {mmCP_CPC_STALLED_STAT1},
500 {mmCP_CPC_STATUS},
501 {mmGB_ADDR_CONFIG},
502 {mmMC_ARB_RAMCFG},
503 {mmGB_TILE_MODE0},
504 {mmGB_TILE_MODE1},
505 {mmGB_TILE_MODE2},
506 {mmGB_TILE_MODE3},
507 {mmGB_TILE_MODE4},
508 {mmGB_TILE_MODE5},
509 {mmGB_TILE_MODE6},
510 {mmGB_TILE_MODE7},
511 {mmGB_TILE_MODE8},
512 {mmGB_TILE_MODE9},
513 {mmGB_TILE_MODE10},
514 {mmGB_TILE_MODE11},
515 {mmGB_TILE_MODE12},
516 {mmGB_TILE_MODE13},
517 {mmGB_TILE_MODE14},
518 {mmGB_TILE_MODE15},
519 {mmGB_TILE_MODE16},
520 {mmGB_TILE_MODE17},
521 {mmGB_TILE_MODE18},
522 {mmGB_TILE_MODE19},
523 {mmGB_TILE_MODE20},
524 {mmGB_TILE_MODE21},
525 {mmGB_TILE_MODE22},
526 {mmGB_TILE_MODE23},
527 {mmGB_TILE_MODE24},
528 {mmGB_TILE_MODE25},
529 {mmGB_TILE_MODE26},
530 {mmGB_TILE_MODE27},
531 {mmGB_TILE_MODE28},
532 {mmGB_TILE_MODE29},
533 {mmGB_TILE_MODE30},
534 {mmGB_TILE_MODE31},
535 {mmGB_MACROTILE_MODE0},
536 {mmGB_MACROTILE_MODE1},
537 {mmGB_MACROTILE_MODE2},
538 {mmGB_MACROTILE_MODE3},
539 {mmGB_MACROTILE_MODE4},
540 {mmGB_MACROTILE_MODE5},
541 {mmGB_MACROTILE_MODE6},
542 {mmGB_MACROTILE_MODE7},
543 {mmGB_MACROTILE_MODE8},
544 {mmGB_MACROTILE_MODE9},
545 {mmGB_MACROTILE_MODE10},
546 {mmGB_MACROTILE_MODE11},
547 {mmGB_MACROTILE_MODE12},
548 {mmGB_MACROTILE_MODE13},
549 {mmGB_MACROTILE_MODE14},
550 {mmGB_MACROTILE_MODE15},
551 {mmCC_RB_BACKEND_DISABLE, true},
552 {mmGC_USER_RB_BACKEND_DISABLE, true},
553 {mmGB_BACKEND_MAP, false},
554 {mmPA_SC_RASTER_CONFIG, true},
555 {mmPA_SC_RASTER_CONFIG_1, true},
556};
557
558static uint32_t vi_get_register_value(struct amdgpu_device *adev,
559 bool indexed, u32 se_num,
560 u32 sh_num, u32 reg_offset)
561{
562 if (indexed) {
563 uint32_t val;
564 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
565 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
566
567 switch (reg_offset) {
568 case mmCC_RB_BACKEND_DISABLE:
569 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
570 case mmGC_USER_RB_BACKEND_DISABLE:
571 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
572 case mmPA_SC_RASTER_CONFIG:
573 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
574 case mmPA_SC_RASTER_CONFIG_1:
575 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
576 }
577
578 mutex_lock(&adev->grbm_idx_mutex);
579 if (se_num != 0xffffffff || sh_num != 0xffffffff)
580 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
581
582 val = RREG32(reg_offset);
583
584 if (se_num != 0xffffffff || sh_num != 0xffffffff)
585 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
586 mutex_unlock(&adev->grbm_idx_mutex);
587 return val;
588 } else {
589 unsigned idx;
590
591 switch (reg_offset) {
592 case mmGB_ADDR_CONFIG:
593 return adev->gfx.config.gb_addr_config;
594 case mmMC_ARB_RAMCFG:
595 return adev->gfx.config.mc_arb_ramcfg;
596 case mmGB_TILE_MODE0:
597 case mmGB_TILE_MODE1:
598 case mmGB_TILE_MODE2:
599 case mmGB_TILE_MODE3:
600 case mmGB_TILE_MODE4:
601 case mmGB_TILE_MODE5:
602 case mmGB_TILE_MODE6:
603 case mmGB_TILE_MODE7:
604 case mmGB_TILE_MODE8:
605 case mmGB_TILE_MODE9:
606 case mmGB_TILE_MODE10:
607 case mmGB_TILE_MODE11:
608 case mmGB_TILE_MODE12:
609 case mmGB_TILE_MODE13:
610 case mmGB_TILE_MODE14:
611 case mmGB_TILE_MODE15:
612 case mmGB_TILE_MODE16:
613 case mmGB_TILE_MODE17:
614 case mmGB_TILE_MODE18:
615 case mmGB_TILE_MODE19:
616 case mmGB_TILE_MODE20:
617 case mmGB_TILE_MODE21:
618 case mmGB_TILE_MODE22:
619 case mmGB_TILE_MODE23:
620 case mmGB_TILE_MODE24:
621 case mmGB_TILE_MODE25:
622 case mmGB_TILE_MODE26:
623 case mmGB_TILE_MODE27:
624 case mmGB_TILE_MODE28:
625 case mmGB_TILE_MODE29:
626 case mmGB_TILE_MODE30:
627 case mmGB_TILE_MODE31:
628 idx = (reg_offset - mmGB_TILE_MODE0);
629 return adev->gfx.config.tile_mode_array[idx];
630 case mmGB_MACROTILE_MODE0:
631 case mmGB_MACROTILE_MODE1:
632 case mmGB_MACROTILE_MODE2:
633 case mmGB_MACROTILE_MODE3:
634 case mmGB_MACROTILE_MODE4:
635 case mmGB_MACROTILE_MODE5:
636 case mmGB_MACROTILE_MODE6:
637 case mmGB_MACROTILE_MODE7:
638 case mmGB_MACROTILE_MODE8:
639 case mmGB_MACROTILE_MODE9:
640 case mmGB_MACROTILE_MODE10:
641 case mmGB_MACROTILE_MODE11:
642 case mmGB_MACROTILE_MODE12:
643 case mmGB_MACROTILE_MODE13:
644 case mmGB_MACROTILE_MODE14:
645 case mmGB_MACROTILE_MODE15:
646 idx = (reg_offset - mmGB_MACROTILE_MODE0);
647 return adev->gfx.config.macrotile_mode_array[idx];
648 default:
649 return RREG32(reg_offset);
650 }
651 }
652}
653
654static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
655 u32 sh_num, u32 reg_offset, u32 *value)
656{
657 uint32_t i;
658
659 *value = 0;
660 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
661 bool indexed = vi_allowed_read_registers[i].grbm_indexed;
662
663 if (reg_offset != vi_allowed_read_registers[i].reg_offset)
664 continue;
665
666 *value = vi_get_register_value(adev, indexed, se_num, sh_num,
667 reg_offset);
668 return 0;
669 }
670 return -EINVAL;
671}
672
673static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
674{
675 u32 i;
676
677 dev_info(adev->dev, "GPU pci config reset\n");
678
679 /* disable BM */
680 pci_clear_master(adev->pdev);
681 /* reset */
682 amdgpu_device_pci_config_reset(adev);
683
684 udelay(100);
685
686 /* wait for asic to come out of reset */
687 for (i = 0; i < adev->usec_timeout; i++) {
688 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
689 /* enable BM */
690 pci_set_master(adev->pdev);
691 adev->has_hw_reset = true;
692 return 0;
693 }
694 udelay(1);
695 }
696 return -EINVAL;
697}
698
699/**
700 * vi_asic_reset - soft reset GPU
701 *
702 * @adev: amdgpu_device pointer
703 *
704 * Look up which blocks are hung and attempt
705 * to reset them.
706 * Returns 0 for success.
707 */
708static int vi_asic_reset(struct amdgpu_device *adev)
709{
710 int r;
711
712 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
713
714 r = vi_gpu_pci_config_reset(adev);
715
716 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
717
718 return r;
719}
720
721static enum amd_reset_method
722vi_asic_reset_method(struct amdgpu_device *adev)
723{
724 return AMD_RESET_METHOD_LEGACY;
725}
726
727static u32 vi_get_config_memsize(struct amdgpu_device *adev)
728{
729 return RREG32(mmCONFIG_MEMSIZE);
730}
731
732static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
733 u32 cntl_reg, u32 status_reg)
734{
735 int r, i;
736 struct atom_clock_dividers dividers;
737 uint32_t tmp;
738
739 r = amdgpu_atombios_get_clock_dividers(adev,
740 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
741 clock, false, &dividers);
742 if (r)
743 return r;
744
745 tmp = RREG32_SMC(cntl_reg);
746
747 if (adev->flags & AMD_IS_APU)
748 tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
749 else
750 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
751 CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
752 tmp |= dividers.post_divider;
753 WREG32_SMC(cntl_reg, tmp);
754
755 for (i = 0; i < 100; i++) {
756 tmp = RREG32_SMC(status_reg);
757 if (adev->flags & AMD_IS_APU) {
758 if (tmp & 0x10000)
759 break;
760 } else {
761 if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
762 break;
763 }
764 mdelay(10);
765 }
766 if (i == 100)
767 return -ETIMEDOUT;
768 return 0;
769}
770
771#define ixGNB_CLK1_DFS_CNTL 0xD82200F0
772#define ixGNB_CLK1_STATUS 0xD822010C
773#define ixGNB_CLK2_DFS_CNTL 0xD8220110
774#define ixGNB_CLK2_STATUS 0xD822012C
775#define ixGNB_CLK3_DFS_CNTL 0xD8220130
776#define ixGNB_CLK3_STATUS 0xD822014C
777
778static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
779{
780 int r;
781
782 if (adev->flags & AMD_IS_APU) {
783 r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
784 if (r)
785 return r;
786
787 r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
788 if (r)
789 return r;
790 } else {
791 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
792 if (r)
793 return r;
794
795 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
796 if (r)
797 return r;
798 }
799
800 return 0;
801}
802
803static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
804{
805 int r, i;
806 struct atom_clock_dividers dividers;
807 u32 tmp;
808 u32 reg_ctrl;
809 u32 reg_status;
810 u32 status_mask;
811 u32 reg_mask;
812
813 if (adev->flags & AMD_IS_APU) {
814 reg_ctrl = ixGNB_CLK3_DFS_CNTL;
815 reg_status = ixGNB_CLK3_STATUS;
816 status_mask = 0x00010000;
817 reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
818 } else {
819 reg_ctrl = ixCG_ECLK_CNTL;
820 reg_status = ixCG_ECLK_STATUS;
821 status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
822 reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
823 }
824
825 r = amdgpu_atombios_get_clock_dividers(adev,
826 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
827 ecclk, false, &dividers);
828 if (r)
829 return r;
830
831 for (i = 0; i < 100; i++) {
832 if (RREG32_SMC(reg_status) & status_mask)
833 break;
834 mdelay(10);
835 }
836
837 if (i == 100)
838 return -ETIMEDOUT;
839
840 tmp = RREG32_SMC(reg_ctrl);
841 tmp &= ~reg_mask;
842 tmp |= dividers.post_divider;
843 WREG32_SMC(reg_ctrl, tmp);
844
845 for (i = 0; i < 100; i++) {
846 if (RREG32_SMC(reg_status) & status_mask)
847 break;
848 mdelay(10);
849 }
850
851 if (i == 100)
852 return -ETIMEDOUT;
853
854 return 0;
855}
856
857static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
858{
859 if (pci_is_root_bus(adev->pdev->bus))
860 return;
861
862 if (amdgpu_pcie_gen2 == 0)
863 return;
864
865 if (adev->flags & AMD_IS_APU)
866 return;
867
868 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
869 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
870 return;
871
872 /* todo */
873}
874
875static void vi_program_aspm(struct amdgpu_device *adev)
876{
877
878 if (amdgpu_aspm == 0)
879 return;
880
881 /* todo */
882}
883
884static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
885 bool enable)
886{
887 u32 tmp;
888
889 /* not necessary on CZ */
890 if (adev->flags & AMD_IS_APU)
891 return;
892
893 tmp = RREG32(mmBIF_DOORBELL_APER_EN);
894 if (enable)
895 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
896 else
897 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
898
899 WREG32(mmBIF_DOORBELL_APER_EN, tmp);
900}
901
902#define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
903#define ATI_REV_ID_FUSE_MACRO__SHIFT 9
904#define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
905
906static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
907{
908 if (adev->flags & AMD_IS_APU)
909 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
910 >> ATI_REV_ID_FUSE_MACRO__SHIFT;
911 else
912 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
913 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
914}
915
916static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
917{
918 if (!ring || !ring->funcs->emit_wreg) {
919 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
920 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
921 } else {
922 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
923 }
924}
925
926static void vi_invalidate_hdp(struct amdgpu_device *adev,
927 struct amdgpu_ring *ring)
928{
929 if (!ring || !ring->funcs->emit_wreg) {
930 WREG32(mmHDP_DEBUG0, 1);
931 RREG32(mmHDP_DEBUG0);
932 } else {
933 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
934 }
935}
936
937static bool vi_need_full_reset(struct amdgpu_device *adev)
938{
939 switch (adev->asic_type) {
940 case CHIP_CARRIZO:
941 case CHIP_STONEY:
942 /* CZ has hang issues with full reset at the moment */
943 return false;
944 case CHIP_FIJI:
945 case CHIP_TONGA:
946 /* XXX: soft reset should work on fiji and tonga */
947 return true;
948 case CHIP_POLARIS10:
949 case CHIP_POLARIS11:
950 case CHIP_POLARIS12:
951 case CHIP_TOPAZ:
952 default:
953 /* change this when we support soft reset */
954 return true;
955 }
956}
957
958static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
959 uint64_t *count1)
960{
961 uint32_t perfctr = 0;
962 uint64_t cnt0_of, cnt1_of;
963 int tmp;
964
965 /* This reports 0 on APUs, so return to avoid writing/reading registers
966 * that may or may not be different from their GPU counterparts
967 */
968 if (adev->flags & AMD_IS_APU)
969 return;
970
971 /* Set the 2 events that we wish to watch, defined above */
972 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
973 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
974 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
975
976 /* Write to enable desired perf counters */
977 WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
978 /* Zero out and enable the perf counters
979 * Write 0x5:
980 * Bit 0 = Start all counters(1)
981 * Bit 2 = Global counter reset enable(1)
982 */
983 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
984
985 msleep(1000);
986
987 /* Load the shadow and disable the perf counters
988 * Write 0x2:
989 * Bit 0 = Stop counters(0)
990 * Bit 1 = Load the shadow counters(1)
991 */
992 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
993
994 /* Read register values to get any >32bit overflow */
995 tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
996 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
997 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
998
999 /* Get the values and add the overflow */
1000 *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
1001 *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
1002}
1003
1004static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev)
1005{
1006 uint64_t nak_r, nak_g;
1007
1008 /* Get the number of NAKs received and generated */
1009 nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
1010 nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
1011
1012 /* Add the total number of NAKs, i.e the number of replays */
1013 return (nak_r + nak_g);
1014}
1015
1016static bool vi_need_reset_on_init(struct amdgpu_device *adev)
1017{
1018 u32 clock_cntl, pc;
1019
1020 if (adev->flags & AMD_IS_APU)
1021 return false;
1022
1023 /* check if the SMC is already running */
1024 clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
1025 pc = RREG32_SMC(ixSMC_PC_C);
1026 if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
1027 (0x20100 <= pc))
1028 return true;
1029
1030 return false;
1031}
1032
1033static const struct amdgpu_asic_funcs vi_asic_funcs =
1034{
1035 .read_disabled_bios = &vi_read_disabled_bios,
1036 .read_bios_from_rom = &vi_read_bios_from_rom,
1037 .read_register = &vi_read_register,
1038 .reset = &vi_asic_reset,
1039 .reset_method = &vi_asic_reset_method,
1040 .set_vga_state = &vi_vga_set_state,
1041 .get_xclk = &vi_get_xclk,
1042 .set_uvd_clocks = &vi_set_uvd_clocks,
1043 .set_vce_clocks = &vi_set_vce_clocks,
1044 .get_config_memsize = &vi_get_config_memsize,
1045 .flush_hdp = &vi_flush_hdp,
1046 .invalidate_hdp = &vi_invalidate_hdp,
1047 .need_full_reset = &vi_need_full_reset,
1048 .init_doorbell_index = &legacy_doorbell_index_init,
1049 .get_pcie_usage = &vi_get_pcie_usage,
1050 .need_reset_on_init = &vi_need_reset_on_init,
1051 .get_pcie_replay_count = &vi_get_pcie_replay_count,
1052};
1053
1054#define CZ_REV_BRISTOL(rev) \
1055 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
1056
1057static int vi_common_early_init(void *handle)
1058{
1059 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1060
1061 if (adev->flags & AMD_IS_APU) {
1062 adev->smc_rreg = &cz_smc_rreg;
1063 adev->smc_wreg = &cz_smc_wreg;
1064 } else {
1065 adev->smc_rreg = &vi_smc_rreg;
1066 adev->smc_wreg = &vi_smc_wreg;
1067 }
1068 adev->pcie_rreg = &vi_pcie_rreg;
1069 adev->pcie_wreg = &vi_pcie_wreg;
1070 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1071 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1072 adev->didt_rreg = &vi_didt_rreg;
1073 adev->didt_wreg = &vi_didt_wreg;
1074 adev->gc_cac_rreg = &vi_gc_cac_rreg;
1075 adev->gc_cac_wreg = &vi_gc_cac_wreg;
1076
1077 adev->asic_funcs = &vi_asic_funcs;
1078
1079 adev->rev_id = vi_get_rev_id(adev);
1080 adev->external_rev_id = 0xFF;
1081 switch (adev->asic_type) {
1082 case CHIP_TOPAZ:
1083 adev->cg_flags = 0;
1084 adev->pg_flags = 0;
1085 adev->external_rev_id = 0x1;
1086 break;
1087 case CHIP_FIJI:
1088 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1089 AMD_CG_SUPPORT_GFX_MGLS |
1090 AMD_CG_SUPPORT_GFX_RLC_LS |
1091 AMD_CG_SUPPORT_GFX_CP_LS |
1092 AMD_CG_SUPPORT_GFX_CGTS |
1093 AMD_CG_SUPPORT_GFX_CGTS_LS |
1094 AMD_CG_SUPPORT_GFX_CGCG |
1095 AMD_CG_SUPPORT_GFX_CGLS |
1096 AMD_CG_SUPPORT_SDMA_MGCG |
1097 AMD_CG_SUPPORT_SDMA_LS |
1098 AMD_CG_SUPPORT_BIF_LS |
1099 AMD_CG_SUPPORT_HDP_MGCG |
1100 AMD_CG_SUPPORT_HDP_LS |
1101 AMD_CG_SUPPORT_ROM_MGCG |
1102 AMD_CG_SUPPORT_MC_MGCG |
1103 AMD_CG_SUPPORT_MC_LS |
1104 AMD_CG_SUPPORT_UVD_MGCG;
1105 adev->pg_flags = 0;
1106 adev->external_rev_id = adev->rev_id + 0x3c;
1107 break;
1108 case CHIP_TONGA:
1109 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1110 AMD_CG_SUPPORT_GFX_CGCG |
1111 AMD_CG_SUPPORT_GFX_CGLS |
1112 AMD_CG_SUPPORT_SDMA_MGCG |
1113 AMD_CG_SUPPORT_SDMA_LS |
1114 AMD_CG_SUPPORT_BIF_LS |
1115 AMD_CG_SUPPORT_HDP_MGCG |
1116 AMD_CG_SUPPORT_HDP_LS |
1117 AMD_CG_SUPPORT_ROM_MGCG |
1118 AMD_CG_SUPPORT_MC_MGCG |
1119 AMD_CG_SUPPORT_MC_LS |
1120 AMD_CG_SUPPORT_DRM_LS |
1121 AMD_CG_SUPPORT_UVD_MGCG;
1122 adev->pg_flags = 0;
1123 adev->external_rev_id = adev->rev_id + 0x14;
1124 break;
1125 case CHIP_POLARIS11:
1126 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1127 AMD_CG_SUPPORT_GFX_RLC_LS |
1128 AMD_CG_SUPPORT_GFX_CP_LS |
1129 AMD_CG_SUPPORT_GFX_CGCG |
1130 AMD_CG_SUPPORT_GFX_CGLS |
1131 AMD_CG_SUPPORT_GFX_3D_CGCG |
1132 AMD_CG_SUPPORT_GFX_3D_CGLS |
1133 AMD_CG_SUPPORT_SDMA_MGCG |
1134 AMD_CG_SUPPORT_SDMA_LS |
1135 AMD_CG_SUPPORT_BIF_MGCG |
1136 AMD_CG_SUPPORT_BIF_LS |
1137 AMD_CG_SUPPORT_HDP_MGCG |
1138 AMD_CG_SUPPORT_HDP_LS |
1139 AMD_CG_SUPPORT_ROM_MGCG |
1140 AMD_CG_SUPPORT_MC_MGCG |
1141 AMD_CG_SUPPORT_MC_LS |
1142 AMD_CG_SUPPORT_DRM_LS |
1143 AMD_CG_SUPPORT_UVD_MGCG |
1144 AMD_CG_SUPPORT_VCE_MGCG;
1145 adev->pg_flags = 0;
1146 adev->external_rev_id = adev->rev_id + 0x5A;
1147 break;
1148 case CHIP_POLARIS10:
1149 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1150 AMD_CG_SUPPORT_GFX_RLC_LS |
1151 AMD_CG_SUPPORT_GFX_CP_LS |
1152 AMD_CG_SUPPORT_GFX_CGCG |
1153 AMD_CG_SUPPORT_GFX_CGLS |
1154 AMD_CG_SUPPORT_GFX_3D_CGCG |
1155 AMD_CG_SUPPORT_GFX_3D_CGLS |
1156 AMD_CG_SUPPORT_SDMA_MGCG |
1157 AMD_CG_SUPPORT_SDMA_LS |
1158 AMD_CG_SUPPORT_BIF_MGCG |
1159 AMD_CG_SUPPORT_BIF_LS |
1160 AMD_CG_SUPPORT_HDP_MGCG |
1161 AMD_CG_SUPPORT_HDP_LS |
1162 AMD_CG_SUPPORT_ROM_MGCG |
1163 AMD_CG_SUPPORT_MC_MGCG |
1164 AMD_CG_SUPPORT_MC_LS |
1165 AMD_CG_SUPPORT_DRM_LS |
1166 AMD_CG_SUPPORT_UVD_MGCG |
1167 AMD_CG_SUPPORT_VCE_MGCG;
1168 adev->pg_flags = 0;
1169 adev->external_rev_id = adev->rev_id + 0x50;
1170 break;
1171 case CHIP_POLARIS12:
1172 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1173 AMD_CG_SUPPORT_GFX_RLC_LS |
1174 AMD_CG_SUPPORT_GFX_CP_LS |
1175 AMD_CG_SUPPORT_GFX_CGCG |
1176 AMD_CG_SUPPORT_GFX_CGLS |
1177 AMD_CG_SUPPORT_GFX_3D_CGCG |
1178 AMD_CG_SUPPORT_GFX_3D_CGLS |
1179 AMD_CG_SUPPORT_SDMA_MGCG |
1180 AMD_CG_SUPPORT_SDMA_LS |
1181 AMD_CG_SUPPORT_BIF_MGCG |
1182 AMD_CG_SUPPORT_BIF_LS |
1183 AMD_CG_SUPPORT_HDP_MGCG |
1184 AMD_CG_SUPPORT_HDP_LS |
1185 AMD_CG_SUPPORT_ROM_MGCG |
1186 AMD_CG_SUPPORT_MC_MGCG |
1187 AMD_CG_SUPPORT_MC_LS |
1188 AMD_CG_SUPPORT_DRM_LS |
1189 AMD_CG_SUPPORT_UVD_MGCG |
1190 AMD_CG_SUPPORT_VCE_MGCG;
1191 adev->pg_flags = 0;
1192 adev->external_rev_id = adev->rev_id + 0x64;
1193 break;
1194 case CHIP_VEGAM:
1195 adev->cg_flags = 0;
1196 /*AMD_CG_SUPPORT_GFX_MGCG |
1197 AMD_CG_SUPPORT_GFX_RLC_LS |
1198 AMD_CG_SUPPORT_GFX_CP_LS |
1199 AMD_CG_SUPPORT_GFX_CGCG |
1200 AMD_CG_SUPPORT_GFX_CGLS |
1201 AMD_CG_SUPPORT_GFX_3D_CGCG |
1202 AMD_CG_SUPPORT_GFX_3D_CGLS |
1203 AMD_CG_SUPPORT_SDMA_MGCG |
1204 AMD_CG_SUPPORT_SDMA_LS |
1205 AMD_CG_SUPPORT_BIF_MGCG |
1206 AMD_CG_SUPPORT_BIF_LS |
1207 AMD_CG_SUPPORT_HDP_MGCG |
1208 AMD_CG_SUPPORT_HDP_LS |
1209 AMD_CG_SUPPORT_ROM_MGCG |
1210 AMD_CG_SUPPORT_MC_MGCG |
1211 AMD_CG_SUPPORT_MC_LS |
1212 AMD_CG_SUPPORT_DRM_LS |
1213 AMD_CG_SUPPORT_UVD_MGCG |
1214 AMD_CG_SUPPORT_VCE_MGCG;*/
1215 adev->pg_flags = 0;
1216 adev->external_rev_id = adev->rev_id + 0x6E;
1217 break;
1218 case CHIP_CARRIZO:
1219 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1220 AMD_CG_SUPPORT_GFX_MGCG |
1221 AMD_CG_SUPPORT_GFX_MGLS |
1222 AMD_CG_SUPPORT_GFX_RLC_LS |
1223 AMD_CG_SUPPORT_GFX_CP_LS |
1224 AMD_CG_SUPPORT_GFX_CGTS |
1225 AMD_CG_SUPPORT_GFX_CGTS_LS |
1226 AMD_CG_SUPPORT_GFX_CGCG |
1227 AMD_CG_SUPPORT_GFX_CGLS |
1228 AMD_CG_SUPPORT_BIF_LS |
1229 AMD_CG_SUPPORT_HDP_MGCG |
1230 AMD_CG_SUPPORT_HDP_LS |
1231 AMD_CG_SUPPORT_SDMA_MGCG |
1232 AMD_CG_SUPPORT_SDMA_LS |
1233 AMD_CG_SUPPORT_VCE_MGCG;
1234 /* rev0 hardware requires workarounds to support PG */
1235 adev->pg_flags = 0;
1236 if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
1237 adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
1238 AMD_PG_SUPPORT_GFX_PIPELINE |
1239 AMD_PG_SUPPORT_CP |
1240 AMD_PG_SUPPORT_UVD |
1241 AMD_PG_SUPPORT_VCE;
1242 }
1243 adev->external_rev_id = adev->rev_id + 0x1;
1244 break;
1245 case CHIP_STONEY:
1246 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1247 AMD_CG_SUPPORT_GFX_MGCG |
1248 AMD_CG_SUPPORT_GFX_MGLS |
1249 AMD_CG_SUPPORT_GFX_RLC_LS |
1250 AMD_CG_SUPPORT_GFX_CP_LS |
1251 AMD_CG_SUPPORT_GFX_CGTS |
1252 AMD_CG_SUPPORT_GFX_CGTS_LS |
1253 AMD_CG_SUPPORT_GFX_CGLS |
1254 AMD_CG_SUPPORT_BIF_LS |
1255 AMD_CG_SUPPORT_HDP_MGCG |
1256 AMD_CG_SUPPORT_HDP_LS |
1257 AMD_CG_SUPPORT_SDMA_MGCG |
1258 AMD_CG_SUPPORT_SDMA_LS |
1259 AMD_CG_SUPPORT_VCE_MGCG;
1260 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1261 AMD_PG_SUPPORT_GFX_SMG |
1262 AMD_PG_SUPPORT_GFX_PIPELINE |
1263 AMD_PG_SUPPORT_CP |
1264 AMD_PG_SUPPORT_UVD |
1265 AMD_PG_SUPPORT_VCE;
1266 adev->external_rev_id = adev->rev_id + 0x61;
1267 break;
1268 default:
1269 /* FIXME: not supported yet */
1270 return -EINVAL;
1271 }
1272
1273 if (amdgpu_sriov_vf(adev)) {
1274 amdgpu_virt_init_setting(adev);
1275 xgpu_vi_mailbox_set_irq_funcs(adev);
1276 }
1277
1278 return 0;
1279}
1280
1281static int vi_common_late_init(void *handle)
1282{
1283 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1284
1285 if (amdgpu_sriov_vf(adev))
1286 xgpu_vi_mailbox_get_irq(adev);
1287
1288 return 0;
1289}
1290
1291static int vi_common_sw_init(void *handle)
1292{
1293 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1294
1295 if (amdgpu_sriov_vf(adev))
1296 xgpu_vi_mailbox_add_irq_id(adev);
1297
1298 return 0;
1299}
1300
1301static int vi_common_sw_fini(void *handle)
1302{
1303 return 0;
1304}
1305
1306static int vi_common_hw_init(void *handle)
1307{
1308 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1309
1310 /* move the golden regs per IP block */
1311 vi_init_golden_registers(adev);
1312 /* enable pcie gen2/3 link */
1313 vi_pcie_gen3_enable(adev);
1314 /* enable aspm */
1315 vi_program_aspm(adev);
1316 /* enable the doorbell aperture */
1317 vi_enable_doorbell_aperture(adev, true);
1318
1319 return 0;
1320}
1321
1322static int vi_common_hw_fini(void *handle)
1323{
1324 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1325
1326 /* enable the doorbell aperture */
1327 vi_enable_doorbell_aperture(adev, false);
1328
1329 if (amdgpu_sriov_vf(adev))
1330 xgpu_vi_mailbox_put_irq(adev);
1331
1332 return 0;
1333}
1334
1335static int vi_common_suspend(void *handle)
1336{
1337 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1338
1339 return vi_common_hw_fini(adev);
1340}
1341
1342static int vi_common_resume(void *handle)
1343{
1344 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1345
1346 return vi_common_hw_init(adev);
1347}
1348
1349static bool vi_common_is_idle(void *handle)
1350{
1351 return true;
1352}
1353
1354static int vi_common_wait_for_idle(void *handle)
1355{
1356 return 0;
1357}
1358
1359static int vi_common_soft_reset(void *handle)
1360{
1361 return 0;
1362}
1363
1364static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1365 bool enable)
1366{
1367 uint32_t temp, data;
1368
1369 temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1370
1371 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1372 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1373 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1374 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1375 else
1376 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1377 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1378 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1379
1380 if (temp != data)
1381 WREG32_PCIE(ixPCIE_CNTL2, data);
1382}
1383
1384static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1385 bool enable)
1386{
1387 uint32_t temp, data;
1388
1389 temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1390
1391 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1392 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1393 else
1394 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1395
1396 if (temp != data)
1397 WREG32(mmHDP_HOST_PATH_CNTL, data);
1398}
1399
1400static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1401 bool enable)
1402{
1403 uint32_t temp, data;
1404
1405 temp = data = RREG32(mmHDP_MEM_POWER_LS);
1406
1407 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1408 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1409 else
1410 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1411
1412 if (temp != data)
1413 WREG32(mmHDP_MEM_POWER_LS, data);
1414}
1415
1416static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
1417 bool enable)
1418{
1419 uint32_t temp, data;
1420
1421 temp = data = RREG32(0x157a);
1422
1423 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1424 data |= 1;
1425 else
1426 data &= ~1;
1427
1428 if (temp != data)
1429 WREG32(0x157a, data);
1430}
1431
1432
1433static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1434 bool enable)
1435{
1436 uint32_t temp, data;
1437
1438 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1439
1440 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1441 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1442 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1443 else
1444 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1445 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1446
1447 if (temp != data)
1448 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1449}
1450
1451static int vi_common_set_clockgating_state_by_smu(void *handle,
1452 enum amd_clockgating_state state)
1453{
1454 uint32_t msg_id, pp_state = 0;
1455 uint32_t pp_support_state = 0;
1456 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1457
1458 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1459 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1460 pp_support_state = PP_STATE_SUPPORT_LS;
1461 pp_state = PP_STATE_LS;
1462 }
1463 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1464 pp_support_state |= PP_STATE_SUPPORT_CG;
1465 pp_state |= PP_STATE_CG;
1466 }
1467 if (state == AMD_CG_STATE_UNGATE)
1468 pp_state = 0;
1469 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1470 PP_BLOCK_SYS_MC,
1471 pp_support_state,
1472 pp_state);
1473 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1474 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1475 }
1476
1477 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1478 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1479 pp_support_state = PP_STATE_SUPPORT_LS;
1480 pp_state = PP_STATE_LS;
1481 }
1482 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1483 pp_support_state |= PP_STATE_SUPPORT_CG;
1484 pp_state |= PP_STATE_CG;
1485 }
1486 if (state == AMD_CG_STATE_UNGATE)
1487 pp_state = 0;
1488 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1489 PP_BLOCK_SYS_SDMA,
1490 pp_support_state,
1491 pp_state);
1492 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1493 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1494 }
1495
1496 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1497 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1498 pp_support_state = PP_STATE_SUPPORT_LS;
1499 pp_state = PP_STATE_LS;
1500 }
1501 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1502 pp_support_state |= PP_STATE_SUPPORT_CG;
1503 pp_state |= PP_STATE_CG;
1504 }
1505 if (state == AMD_CG_STATE_UNGATE)
1506 pp_state = 0;
1507 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1508 PP_BLOCK_SYS_HDP,
1509 pp_support_state,
1510 pp_state);
1511 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1512 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1513 }
1514
1515
1516 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1517 if (state == AMD_CG_STATE_UNGATE)
1518 pp_state = 0;
1519 else
1520 pp_state = PP_STATE_LS;
1521
1522 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1523 PP_BLOCK_SYS_BIF,
1524 PP_STATE_SUPPORT_LS,
1525 pp_state);
1526 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1527 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1528 }
1529 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1530 if (state == AMD_CG_STATE_UNGATE)
1531 pp_state = 0;
1532 else
1533 pp_state = PP_STATE_CG;
1534
1535 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1536 PP_BLOCK_SYS_BIF,
1537 PP_STATE_SUPPORT_CG,
1538 pp_state);
1539 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1540 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1541 }
1542
1543 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1544
1545 if (state == AMD_CG_STATE_UNGATE)
1546 pp_state = 0;
1547 else
1548 pp_state = PP_STATE_LS;
1549
1550 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1551 PP_BLOCK_SYS_DRM,
1552 PP_STATE_SUPPORT_LS,
1553 pp_state);
1554 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1555 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1556 }
1557
1558 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1559
1560 if (state == AMD_CG_STATE_UNGATE)
1561 pp_state = 0;
1562 else
1563 pp_state = PP_STATE_CG;
1564
1565 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1566 PP_BLOCK_SYS_ROM,
1567 PP_STATE_SUPPORT_CG,
1568 pp_state);
1569 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1570 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1571 }
1572 return 0;
1573}
1574
1575static int vi_common_set_clockgating_state(void *handle,
1576 enum amd_clockgating_state state)
1577{
1578 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1579
1580 if (amdgpu_sriov_vf(adev))
1581 return 0;
1582
1583 switch (adev->asic_type) {
1584 case CHIP_FIJI:
1585 vi_update_bif_medium_grain_light_sleep(adev,
1586 state == AMD_CG_STATE_GATE);
1587 vi_update_hdp_medium_grain_clock_gating(adev,
1588 state == AMD_CG_STATE_GATE);
1589 vi_update_hdp_light_sleep(adev,
1590 state == AMD_CG_STATE_GATE);
1591 vi_update_rom_medium_grain_clock_gating(adev,
1592 state == AMD_CG_STATE_GATE);
1593 break;
1594 case CHIP_CARRIZO:
1595 case CHIP_STONEY:
1596 vi_update_bif_medium_grain_light_sleep(adev,
1597 state == AMD_CG_STATE_GATE);
1598 vi_update_hdp_medium_grain_clock_gating(adev,
1599 state == AMD_CG_STATE_GATE);
1600 vi_update_hdp_light_sleep(adev,
1601 state == AMD_CG_STATE_GATE);
1602 vi_update_drm_light_sleep(adev,
1603 state == AMD_CG_STATE_GATE);
1604 break;
1605 case CHIP_TONGA:
1606 case CHIP_POLARIS10:
1607 case CHIP_POLARIS11:
1608 case CHIP_POLARIS12:
1609 case CHIP_VEGAM:
1610 vi_common_set_clockgating_state_by_smu(adev, state);
1611 default:
1612 break;
1613 }
1614 return 0;
1615}
1616
1617static int vi_common_set_powergating_state(void *handle,
1618 enum amd_powergating_state state)
1619{
1620 return 0;
1621}
1622
1623static void vi_common_get_clockgating_state(void *handle, u32 *flags)
1624{
1625 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1626 int data;
1627
1628 if (amdgpu_sriov_vf(adev))
1629 *flags = 0;
1630
1631 /* AMD_CG_SUPPORT_BIF_LS */
1632 data = RREG32_PCIE(ixPCIE_CNTL2);
1633 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
1634 *flags |= AMD_CG_SUPPORT_BIF_LS;
1635
1636 /* AMD_CG_SUPPORT_HDP_LS */
1637 data = RREG32(mmHDP_MEM_POWER_LS);
1638 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1639 *flags |= AMD_CG_SUPPORT_HDP_LS;
1640
1641 /* AMD_CG_SUPPORT_HDP_MGCG */
1642 data = RREG32(mmHDP_HOST_PATH_CNTL);
1643 if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
1644 *flags |= AMD_CG_SUPPORT_HDP_MGCG;
1645
1646 /* AMD_CG_SUPPORT_ROM_MGCG */
1647 data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1648 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1649 *flags |= AMD_CG_SUPPORT_ROM_MGCG;
1650}
1651
1652static const struct amd_ip_funcs vi_common_ip_funcs = {
1653 .name = "vi_common",
1654 .early_init = vi_common_early_init,
1655 .late_init = vi_common_late_init,
1656 .sw_init = vi_common_sw_init,
1657 .sw_fini = vi_common_sw_fini,
1658 .hw_init = vi_common_hw_init,
1659 .hw_fini = vi_common_hw_fini,
1660 .suspend = vi_common_suspend,
1661 .resume = vi_common_resume,
1662 .is_idle = vi_common_is_idle,
1663 .wait_for_idle = vi_common_wait_for_idle,
1664 .soft_reset = vi_common_soft_reset,
1665 .set_clockgating_state = vi_common_set_clockgating_state,
1666 .set_powergating_state = vi_common_set_powergating_state,
1667 .get_clockgating_state = vi_common_get_clockgating_state,
1668};
1669
1670static const struct amdgpu_ip_block_version vi_common_ip_block =
1671{
1672 .type = AMD_IP_BLOCK_TYPE_COMMON,
1673 .major = 1,
1674 .minor = 0,
1675 .rev = 0,
1676 .funcs = &vi_common_ip_funcs,
1677};
1678
1679int vi_set_ip_blocks(struct amdgpu_device *adev)
1680{
1681 /* in early init stage, vbios code won't work */
1682 vi_detect_hw_virtualization(adev);
1683
1684 if (amdgpu_sriov_vf(adev))
1685 adev->virt.ops = &xgpu_vi_virt_ops;
1686
1687 switch (adev->asic_type) {
1688 case CHIP_TOPAZ:
1689 /* topaz has no DCE, UVD, VCE */
1690 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1691 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
1692 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
1693 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1694 amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
1695 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1696 if (adev->enable_virtual_display)
1697 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1698 break;
1699 case CHIP_FIJI:
1700 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1701 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
1702 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1703 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1704 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1705 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1706 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1707 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1708#if defined(CONFIG_DRM_AMD_DC)
1709 else if (amdgpu_device_has_dc_support(adev))
1710 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1711#endif
1712 else
1713 amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
1714 if (!amdgpu_sriov_vf(adev)) {
1715 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1716 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1717 }
1718 break;
1719 case CHIP_TONGA:
1720 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1721 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1722 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1723 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1724 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1725 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1726 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1727 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1728#if defined(CONFIG_DRM_AMD_DC)
1729 else if (amdgpu_device_has_dc_support(adev))
1730 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1731#endif
1732 else
1733 amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
1734 if (!amdgpu_sriov_vf(adev)) {
1735 amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
1736 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1737 }
1738 break;
1739 case CHIP_POLARIS10:
1740 case CHIP_POLARIS11:
1741 case CHIP_POLARIS12:
1742 case CHIP_VEGAM:
1743 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1744 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
1745 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1746 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1747 amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
1748 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1749 if (adev->enable_virtual_display)
1750 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1751#if defined(CONFIG_DRM_AMD_DC)
1752 else if (amdgpu_device_has_dc_support(adev))
1753 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1754#endif
1755 else
1756 amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
1757 amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
1758 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1759 break;
1760 case CHIP_CARRIZO:
1761 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1762 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1763 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1764 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1765 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1766 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1767 if (adev->enable_virtual_display)
1768 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1769#if defined(CONFIG_DRM_AMD_DC)
1770 else if (amdgpu_device_has_dc_support(adev))
1771 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1772#endif
1773 else
1774 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1775 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1776 amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
1777#if defined(CONFIG_DRM_AMD_ACP)
1778 amdgpu_device_ip_block_add(adev, &acp_ip_block);
1779#endif
1780 break;
1781 case CHIP_STONEY:
1782 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1783 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1784 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1785 amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
1786 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1787 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1788 if (adev->enable_virtual_display)
1789 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1790#if defined(CONFIG_DRM_AMD_DC)
1791 else if (amdgpu_device_has_dc_support(adev))
1792 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1793#endif
1794 else
1795 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1796 amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
1797 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1798#if defined(CONFIG_DRM_AMD_ACP)
1799 amdgpu_device_ip_block_add(adev, &acp_ip_block);
1800#endif
1801 break;
1802 default:
1803 /* FIXME: not supported yet */
1804 return -EINVAL;
1805 }
1806
1807 return 0;
1808}
1809
1810void legacy_doorbell_index_init(struct amdgpu_device *adev)
1811{
1812 adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
1813 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
1814 adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
1815 adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
1816 adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
1817 adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
1818 adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
1819 adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
1820 adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
1821 adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
1822 adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0;
1823 adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1;
1824 adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
1825 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
1826}