2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include <linux/module.h>
30 #include "radeon_asic.h"
33 #include "cik_blit_shaders.h"
36 #define CIK_PFP_UCODE_SIZE 2144
37 #define CIK_ME_UCODE_SIZE 2144
38 #define CIK_CE_UCODE_SIZE 2144
40 #define CIK_MEC_UCODE_SIZE 4192
42 #define BONAIRE_RLC_UCODE_SIZE 2048
43 #define KB_RLC_UCODE_SIZE 2560
44 #define KV_RLC_UCODE_SIZE 2560
46 #define CIK_MC_UCODE_SIZE 7866
48 MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
49 MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
50 MODULE_FIRMWARE("radeon/BONAIRE_ce.bin");
51 MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
52 MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
53 MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
54 MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
55 MODULE_FIRMWARE("radeon/KAVERI_me.bin");
56 MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
57 MODULE_FIRMWARE("radeon/KAVERI_mec.bin");
58 MODULE_FIRMWARE("radeon/KAVERI_rlc.bin");
59 MODULE_FIRMWARE("radeon/KABINI_pfp.bin");
60 MODULE_FIRMWARE("radeon/KABINI_me.bin");
61 MODULE_FIRMWARE("radeon/KABINI_ce.bin");
62 MODULE_FIRMWARE("radeon/KABINI_mec.bin");
63 MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
65 extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
66 extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
67 extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
69 #define BONAIRE_IO_MC_REGS_SIZE 36
71 static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
73 {0x00000070, 0x04400000},
74 {0x00000071, 0x80c01803},
75 {0x00000072, 0x00004004},
76 {0x00000073, 0x00000100},
77 {0x00000074, 0x00ff0000},
78 {0x00000075, 0x34000000},
79 {0x00000076, 0x08000014},
80 {0x00000077, 0x00cc08ec},
81 {0x00000078, 0x00000400},
82 {0x00000079, 0x00000000},
83 {0x0000007a, 0x04090000},
84 {0x0000007c, 0x00000000},
85 {0x0000007e, 0x4408a8e8},
86 {0x0000007f, 0x00000304},
87 {0x00000080, 0x00000000},
88 {0x00000082, 0x00000001},
89 {0x00000083, 0x00000002},
90 {0x00000084, 0xf3e4f400},
91 {0x00000085, 0x052024e3},
92 {0x00000087, 0x00000000},
93 {0x00000088, 0x01000000},
94 {0x0000008a, 0x1c0a0000},
95 {0x0000008b, 0xff010000},
96 {0x0000008d, 0xffffefff},
97 {0x0000008e, 0xfff3efff},
98 {0x0000008f, 0xfff3efbf},
99 {0x00000092, 0xf7ffffff},
100 {0x00000093, 0xffffff7f},
101 {0x00000095, 0x00101101},
102 {0x00000096, 0x00000fff},
103 {0x00000097, 0x00116fff},
104 {0x00000098, 0x60010000},
105 {0x00000099, 0x10010000},
106 {0x0000009a, 0x00006000},
107 {0x0000009b, 0x00001000},
108 {0x0000009f, 0x00b48000}
113 * ci_mc_load_microcode - load MC ucode into the hw
115 * @rdev: radeon_device pointer
117 * Load the GDDR MC ucode into the hw (CIK).
118 * Returns 0 on success, error on failure.
120 static int ci_mc_load_microcode(struct radeon_device *rdev)
122 const __be32 *fw_data;
123 u32 running, blackout = 0;
125 int i, ucode_size, regs_size;
130 switch (rdev->family) {
133 io_mc_regs = (u32 *)&bonaire_io_mc_regs;
134 ucode_size = CIK_MC_UCODE_SIZE;
135 regs_size = BONAIRE_IO_MC_REGS_SIZE;
139 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
143 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
144 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
147 /* reset the engine and set to writable */
148 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
149 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
151 /* load mc io regs */
152 for (i = 0; i < regs_size; i++) {
153 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
154 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
156 /* load the MC ucode */
157 fw_data = (const __be32 *)rdev->mc_fw->data;
158 for (i = 0; i < ucode_size; i++)
159 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
161 /* put the engine back into the active state */
162 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
163 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
164 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
166 /* wait for training to complete */
167 for (i = 0; i < rdev->usec_timeout; i++) {
168 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
172 for (i = 0; i < rdev->usec_timeout; i++) {
173 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
179 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
186 * cik_init_microcode - load ucode images from disk
188 * @rdev: radeon_device pointer
190 * Use the firmware interface to load the ucode images into
191 * the driver (not loaded into hw).
192 * Returns 0 on success, error on failure.
194 static int cik_init_microcode(struct radeon_device *rdev)
196 struct platform_device *pdev;
197 const char *chip_name;
198 size_t pfp_req_size, me_req_size, ce_req_size,
199 mec_req_size, rlc_req_size, mc_req_size;
205 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
208 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
212 switch (rdev->family) {
214 chip_name = "BONAIRE";
215 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
216 me_req_size = CIK_ME_UCODE_SIZE * 4;
217 ce_req_size = CIK_CE_UCODE_SIZE * 4;
218 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
219 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
220 mc_req_size = CIK_MC_UCODE_SIZE * 4;
223 chip_name = "KAVERI";
224 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
225 me_req_size = CIK_ME_UCODE_SIZE * 4;
226 ce_req_size = CIK_CE_UCODE_SIZE * 4;
227 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
228 rlc_req_size = KV_RLC_UCODE_SIZE * 4;
231 chip_name = "KABINI";
232 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
233 me_req_size = CIK_ME_UCODE_SIZE * 4;
234 ce_req_size = CIK_CE_UCODE_SIZE * 4;
235 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
236 rlc_req_size = KB_RLC_UCODE_SIZE * 4;
241 DRM_INFO("Loading %s Microcode\n", chip_name);
243 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
244 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
247 if (rdev->pfp_fw->size != pfp_req_size) {
249 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
250 rdev->pfp_fw->size, fw_name);
255 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
256 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
259 if (rdev->me_fw->size != me_req_size) {
261 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
262 rdev->me_fw->size, fw_name);
266 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
267 err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev);
270 if (rdev->ce_fw->size != ce_req_size) {
272 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
273 rdev->ce_fw->size, fw_name);
277 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
278 err = request_firmware(&rdev->mec_fw, fw_name, &pdev->dev);
281 if (rdev->mec_fw->size != mec_req_size) {
283 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
284 rdev->mec_fw->size, fw_name);
288 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
289 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
292 if (rdev->rlc_fw->size != rlc_req_size) {
294 "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
295 rdev->rlc_fw->size, fw_name);
299 /* No MC ucode on APUs */
300 if (!(rdev->flags & RADEON_IS_IGP)) {
301 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
302 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
305 if (rdev->mc_fw->size != mc_req_size) {
307 "cik_mc: Bogus length %zu in firmware \"%s\"\n",
308 rdev->mc_fw->size, fw_name);
314 platform_device_unregister(pdev);
319 "cik_cp: Failed to load firmware \"%s\"\n",
321 release_firmware(rdev->pfp_fw);
323 release_firmware(rdev->me_fw);
325 release_firmware(rdev->ce_fw);
327 release_firmware(rdev->rlc_fw);
329 release_firmware(rdev->mc_fw);
339 * cik_tiling_mode_table_init - init the hw tiling table
341 * @rdev: radeon_device pointer
343 * Starting with SI, the tiling setup is done globally in a
344 * set of 32 tiling modes. Rather than selecting each set of
345 * parameters per surface as on older asics, we just select
346 * which index in the tiling table we want to use, and the
347 * surface uses those parameters (CIK).
349 static void cik_tiling_mode_table_init(struct radeon_device *rdev)
351 const u32 num_tile_mode_states = 32;
352 const u32 num_secondary_tile_mode_states = 16;
353 u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
354 u32 num_pipe_configs;
355 u32 num_rbs = rdev->config.cik.max_backends_per_se *
356 rdev->config.cik.max_shader_engines;
358 switch (rdev->config.cik.mem_row_size_in_kb) {
360 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
364 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
367 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
371 num_pipe_configs = rdev->config.cik.max_tile_pipes;
372 if (num_pipe_configs > 8)
373 num_pipe_configs = 8; /* ??? */
375 if (num_pipe_configs == 8) {
376 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
377 switch (reg_offset) {
379 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
380 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
381 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
382 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
385 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
386 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
387 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
388 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
391 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
392 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
393 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
394 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
397 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
398 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
399 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
400 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
403 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
404 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
405 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
406 TILE_SPLIT(split_equal_to_row_size));
409 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
410 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
413 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
414 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
415 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
416 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
419 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
420 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
421 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
422 TILE_SPLIT(split_equal_to_row_size));
425 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
426 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
429 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
430 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
433 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
434 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
435 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
436 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
439 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
440 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
441 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
442 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
445 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
446 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
447 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
448 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
451 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
452 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
455 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
456 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
457 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
458 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
461 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
462 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
463 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
464 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
467 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
468 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
469 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
470 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
473 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
474 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
477 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
478 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
479 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
480 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
483 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
484 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
485 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
486 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
489 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
490 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
491 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
492 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
498 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
500 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
501 switch (reg_offset) {
503 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
504 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
505 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
506 NUM_BANKS(ADDR_SURF_16_BANK));
509 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
510 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
511 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
512 NUM_BANKS(ADDR_SURF_16_BANK));
515 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
516 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
517 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
518 NUM_BANKS(ADDR_SURF_16_BANK));
521 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
522 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
523 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
524 NUM_BANKS(ADDR_SURF_16_BANK));
527 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
528 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
529 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
530 NUM_BANKS(ADDR_SURF_8_BANK));
533 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
534 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
535 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
536 NUM_BANKS(ADDR_SURF_4_BANK));
539 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
540 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
541 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
542 NUM_BANKS(ADDR_SURF_2_BANK));
545 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
546 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
547 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
548 NUM_BANKS(ADDR_SURF_16_BANK));
551 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
552 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
553 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
554 NUM_BANKS(ADDR_SURF_16_BANK));
557 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
558 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
559 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
560 NUM_BANKS(ADDR_SURF_16_BANK));
563 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
564 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
565 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
566 NUM_BANKS(ADDR_SURF_16_BANK));
569 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
570 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
571 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
572 NUM_BANKS(ADDR_SURF_8_BANK));
575 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
576 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
577 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
578 NUM_BANKS(ADDR_SURF_4_BANK));
581 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
582 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
583 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
584 NUM_BANKS(ADDR_SURF_2_BANK));
590 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
592 } else if (num_pipe_configs == 4) {
594 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
595 switch (reg_offset) {
597 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
598 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
599 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
600 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
603 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
604 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
605 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
606 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
609 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
610 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
611 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
612 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
615 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
616 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
617 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
618 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
621 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
622 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
623 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
624 TILE_SPLIT(split_equal_to_row_size));
627 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
628 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
631 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
632 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
633 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
634 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
637 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
638 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
639 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
640 TILE_SPLIT(split_equal_to_row_size));
643 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
644 PIPE_CONFIG(ADDR_SURF_P4_16x16));
647 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
648 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
651 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
652 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
653 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
654 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
657 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
658 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
659 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
660 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
663 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
664 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
665 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
666 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
669 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
670 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
673 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
674 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
675 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
676 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
679 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
680 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
681 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
682 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
685 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
686 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
687 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
688 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
691 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
692 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
695 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
696 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
697 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
698 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
701 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
702 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
703 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
704 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
707 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
708 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
709 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
710 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
716 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
718 } else if (num_rbs < 4) {
719 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
720 switch (reg_offset) {
722 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
723 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
724 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
725 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
728 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
729 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
730 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
731 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
734 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
735 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
736 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
737 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
740 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
741 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
742 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
743 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
746 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
747 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
748 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
749 TILE_SPLIT(split_equal_to_row_size));
752 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
753 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
756 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
757 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
758 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
759 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
762 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
763 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
764 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
765 TILE_SPLIT(split_equal_to_row_size));
768 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
769 PIPE_CONFIG(ADDR_SURF_P4_8x16));
772 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
773 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
776 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
777 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
778 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
779 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
782 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
783 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
784 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
785 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
788 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
789 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
790 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
791 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
794 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
795 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
798 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
799 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
800 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
801 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
804 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
805 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
806 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
807 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
810 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
811 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
812 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
813 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
816 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
817 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
820 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
821 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
822 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
823 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
826 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
827 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
828 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
829 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
832 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
833 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
834 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
835 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
841 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
844 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
845 switch (reg_offset) {
847 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
848 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
849 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
850 NUM_BANKS(ADDR_SURF_16_BANK));
853 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
854 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
855 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
856 NUM_BANKS(ADDR_SURF_16_BANK));
859 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
860 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
861 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
862 NUM_BANKS(ADDR_SURF_16_BANK));
865 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
866 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
867 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
868 NUM_BANKS(ADDR_SURF_16_BANK));
871 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
872 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
873 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
874 NUM_BANKS(ADDR_SURF_16_BANK));
877 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
878 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
879 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
880 NUM_BANKS(ADDR_SURF_8_BANK));
883 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
884 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
885 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
886 NUM_BANKS(ADDR_SURF_4_BANK));
889 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
890 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
891 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
892 NUM_BANKS(ADDR_SURF_16_BANK));
895 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
896 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
897 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
898 NUM_BANKS(ADDR_SURF_16_BANK));
901 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
902 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
903 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
904 NUM_BANKS(ADDR_SURF_16_BANK));
907 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
908 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
909 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
910 NUM_BANKS(ADDR_SURF_16_BANK));
913 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
914 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
915 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
916 NUM_BANKS(ADDR_SURF_16_BANK));
919 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
920 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
921 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
922 NUM_BANKS(ADDR_SURF_8_BANK));
925 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
926 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
927 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
928 NUM_BANKS(ADDR_SURF_4_BANK));
934 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
936 } else if (num_pipe_configs == 2) {
937 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
938 switch (reg_offset) {
940 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
941 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
942 PIPE_CONFIG(ADDR_SURF_P2) |
943 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
946 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
947 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
948 PIPE_CONFIG(ADDR_SURF_P2) |
949 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
952 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
953 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
954 PIPE_CONFIG(ADDR_SURF_P2) |
955 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
958 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
959 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
960 PIPE_CONFIG(ADDR_SURF_P2) |
961 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
964 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
965 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
966 PIPE_CONFIG(ADDR_SURF_P2) |
967 TILE_SPLIT(split_equal_to_row_size));
970 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
971 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
974 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
975 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
976 PIPE_CONFIG(ADDR_SURF_P2) |
977 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
980 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
981 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
982 PIPE_CONFIG(ADDR_SURF_P2) |
983 TILE_SPLIT(split_equal_to_row_size));
986 gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED);
989 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
990 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
993 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
994 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
995 PIPE_CONFIG(ADDR_SURF_P2) |
996 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
999 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1000 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1001 PIPE_CONFIG(ADDR_SURF_P2) |
1002 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1005 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1006 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1007 PIPE_CONFIG(ADDR_SURF_P2) |
1008 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1011 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1012 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1015 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1016 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1017 PIPE_CONFIG(ADDR_SURF_P2) |
1018 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1021 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1022 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1023 PIPE_CONFIG(ADDR_SURF_P2) |
1024 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1027 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1028 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1029 PIPE_CONFIG(ADDR_SURF_P2) |
1030 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1033 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1034 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1037 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1038 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1039 PIPE_CONFIG(ADDR_SURF_P2) |
1040 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1043 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1044 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1045 PIPE_CONFIG(ADDR_SURF_P2) |
1046 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1049 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1050 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1051 PIPE_CONFIG(ADDR_SURF_P2) |
1052 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1058 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1060 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
1061 switch (reg_offset) {
1063 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1064 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1065 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1066 NUM_BANKS(ADDR_SURF_16_BANK));
1069 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1070 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1071 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1072 NUM_BANKS(ADDR_SURF_16_BANK));
1075 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1076 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1077 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1078 NUM_BANKS(ADDR_SURF_16_BANK));
1081 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1082 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1083 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1084 NUM_BANKS(ADDR_SURF_16_BANK));
1087 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1088 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1089 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1090 NUM_BANKS(ADDR_SURF_16_BANK));
1093 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1094 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1095 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1096 NUM_BANKS(ADDR_SURF_16_BANK));
1099 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1100 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1101 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1102 NUM_BANKS(ADDR_SURF_8_BANK));
1105 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1106 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1107 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1108 NUM_BANKS(ADDR_SURF_16_BANK));
1111 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1112 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1113 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1114 NUM_BANKS(ADDR_SURF_16_BANK));
1117 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1118 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1119 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1120 NUM_BANKS(ADDR_SURF_16_BANK));
1123 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1124 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1125 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1126 NUM_BANKS(ADDR_SURF_16_BANK));
1129 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1130 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1131 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1132 NUM_BANKS(ADDR_SURF_16_BANK));
1135 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1136 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1137 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1138 NUM_BANKS(ADDR_SURF_16_BANK));
1141 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1142 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1143 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1144 NUM_BANKS(ADDR_SURF_8_BANK));
1150 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1153 DRM_ERROR("unknown num pipe config: 0x%x\n", num_pipe_configs);
1157 * cik_select_se_sh - select which SE, SH to address
1159 * @rdev: radeon_device pointer
1160 * @se_num: shader engine to address
1161 * @sh_num: sh block to address
1163 * Select which SE, SH combinations to address. Certain
1164 * registers are instanced per SE or SH. 0xffffffff means
1165 * broadcast to all SEs or SHs (CIK).
1167 static void cik_select_se_sh(struct radeon_device *rdev,
1168 u32 se_num, u32 sh_num)
1170 u32 data = INSTANCE_BROADCAST_WRITES;
1172 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1173 data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
1174 else if (se_num == 0xffffffff)
1175 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
1176 else if (sh_num == 0xffffffff)
1177 data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
1179 data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
1180 WREG32(GRBM_GFX_INDEX, data);
1184 * cik_create_bitmask - create a bitmask
1186 * @bit_width: length of the mask
1188 * create a variable length bit mask (CIK).
1189 * Returns the bitmask.
1191 static u32 cik_create_bitmask(u32 bit_width)
1195 for (i = 0; i < bit_width; i++) {
1203 * cik_select_se_sh - select which SE, SH to address
1205 * @rdev: radeon_device pointer
1206 * @max_rb_num: max RBs (render backends) for the asic
1207 * @se_num: number of SEs (shader engines) for the asic
1208 * @sh_per_se: number of SH blocks per SE for the asic
1210 * Calculates the bitmask of disabled RBs (CIK).
1211 * Returns the disabled RB bitmask.
1213 static u32 cik_get_rb_disabled(struct radeon_device *rdev,
1214 u32 max_rb_num, u32 se_num,
1219 data = RREG32(CC_RB_BACKEND_DISABLE);
1221 data &= BACKEND_DISABLE_MASK;
1224 data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
1226 data >>= BACKEND_DISABLE_SHIFT;
1228 mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se);
1234 * cik_setup_rb - setup the RBs on the asic
1236 * @rdev: radeon_device pointer
1237 * @se_num: number of SEs (shader engines) for the asic
1238 * @sh_per_se: number of SH blocks per SE for the asic
1239 * @max_rb_num: max RBs (render backends) for the asic
1241 * Configures per-SE/SH RB registers (CIK).
1243 static void cik_setup_rb(struct radeon_device *rdev,
1244 u32 se_num, u32 sh_per_se,
1249 u32 disabled_rbs = 0;
1250 u32 enabled_rbs = 0;
1252 for (i = 0; i < se_num; i++) {
1253 for (j = 0; j < sh_per_se; j++) {
1254 cik_select_se_sh(rdev, i, j);
1255 data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
1256 disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
1259 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1262 for (i = 0; i < max_rb_num; i++) {
1263 if (!(disabled_rbs & mask))
1264 enabled_rbs |= mask;
1268 for (i = 0; i < se_num; i++) {
1269 cik_select_se_sh(rdev, i, 0xffffffff);
1271 for (j = 0; j < sh_per_se; j++) {
1272 switch (enabled_rbs & 3) {
1274 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
1277 data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
1281 data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
1286 WREG32(PA_SC_RASTER_CONFIG, data);
1288 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1292 * cik_gpu_init - setup the 3D engine
1294 * @rdev: radeon_device pointer
1296 * Configures the 3D engine and tiling configuration
1297 * registers so that the 3D engine is usable.
1299 static void cik_gpu_init(struct radeon_device *rdev)
1301 u32 gb_addr_config = RREG32(GB_ADDR_CONFIG);
1302 u32 mc_shared_chmap, mc_arb_ramcfg;
1303 u32 hdp_host_path_cntl;
1307 switch (rdev->family) {
1309 rdev->config.cik.max_shader_engines = 2;
1310 rdev->config.cik.max_tile_pipes = 4;
1311 rdev->config.cik.max_cu_per_sh = 7;
1312 rdev->config.cik.max_sh_per_se = 1;
1313 rdev->config.cik.max_backends_per_se = 2;
1314 rdev->config.cik.max_texture_channel_caches = 4;
1315 rdev->config.cik.max_gprs = 256;
1316 rdev->config.cik.max_gs_threads = 32;
1317 rdev->config.cik.max_hw_contexts = 8;
1319 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
1320 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
1321 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
1322 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
1323 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
1330 rdev->config.cik.max_shader_engines = 1;
1331 rdev->config.cik.max_tile_pipes = 2;
1332 rdev->config.cik.max_cu_per_sh = 2;
1333 rdev->config.cik.max_sh_per_se = 1;
1334 rdev->config.cik.max_backends_per_se = 1;
1335 rdev->config.cik.max_texture_channel_caches = 2;
1336 rdev->config.cik.max_gprs = 256;
1337 rdev->config.cik.max_gs_threads = 16;
1338 rdev->config.cik.max_hw_contexts = 8;
1340 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
1341 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
1342 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
1343 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
1344 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
1348 /* Initialize HDP */
1349 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1350 WREG32((0x2c14 + j), 0x00000000);
1351 WREG32((0x2c18 + j), 0x00000000);
1352 WREG32((0x2c1c + j), 0x00000000);
1353 WREG32((0x2c20 + j), 0x00000000);
1354 WREG32((0x2c24 + j), 0x00000000);
1357 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1359 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
1361 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1362 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1364 rdev->config.cik.num_tile_pipes = rdev->config.cik.max_tile_pipes;
1365 rdev->config.cik.mem_max_burst_length_bytes = 256;
1366 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
1367 rdev->config.cik.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1368 if (rdev->config.cik.mem_row_size_in_kb > 4)
1369 rdev->config.cik.mem_row_size_in_kb = 4;
1370 /* XXX use MC settings? */
1371 rdev->config.cik.shader_engine_tile_size = 32;
1372 rdev->config.cik.num_gpus = 1;
1373 rdev->config.cik.multi_gpu_tile_size = 64;
1375 /* fix up row size */
1376 gb_addr_config &= ~ROW_SIZE_MASK;
1377 switch (rdev->config.cik.mem_row_size_in_kb) {
1380 gb_addr_config |= ROW_SIZE(0);
1383 gb_addr_config |= ROW_SIZE(1);
1386 gb_addr_config |= ROW_SIZE(2);
1390 /* setup tiling info dword. gb_addr_config is not adequate since it does
1391 * not have bank info, so create a custom tiling dword.
1392 * bits 3:0 num_pipes
1393 * bits 7:4 num_banks
1394 * bits 11:8 group_size
1395 * bits 15:12 row_size
1397 rdev->config.cik.tile_config = 0;
1398 switch (rdev->config.cik.num_tile_pipes) {
1400 rdev->config.cik.tile_config |= (0 << 0);
1403 rdev->config.cik.tile_config |= (1 << 0);
1406 rdev->config.cik.tile_config |= (2 << 0);
1410 /* XXX what about 12? */
1411 rdev->config.cik.tile_config |= (3 << 0);
1414 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
1415 rdev->config.cik.tile_config |= 1 << 4;
1417 rdev->config.cik.tile_config |= 0 << 4;
1418 rdev->config.cik.tile_config |=
1419 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
1420 rdev->config.cik.tile_config |=
1421 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
1423 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1424 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1425 WREG32(DMIF_ADDR_CALC, gb_addr_config);
1427 cik_tiling_mode_table_init(rdev);
1429 cik_setup_rb(rdev, rdev->config.cik.max_shader_engines,
1430 rdev->config.cik.max_sh_per_se,
1431 rdev->config.cik.max_backends_per_se);
1433 /* set HW defaults for 3D engine */
1434 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
1436 WREG32(SX_DEBUG_1, 0x20);
1438 WREG32(TA_CNTL_AUX, 0x00010000);
1440 tmp = RREG32(SPI_CONFIG_CNTL);
1442 WREG32(SPI_CONFIG_CNTL, tmp);
1444 WREG32(SQ_CONFIG, 1);
1446 WREG32(DB_DEBUG, 0);
1448 tmp = RREG32(DB_DEBUG2) & ~0xf00fffff;
1450 WREG32(DB_DEBUG2, tmp);
1452 tmp = RREG32(DB_DEBUG3) & ~0x0002021c;
1454 WREG32(DB_DEBUG3, tmp);
1456 tmp = RREG32(CB_HW_CONTROL) & ~0x00010000;
1458 WREG32(CB_HW_CONTROL, tmp);
1460 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
1462 WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_frontend) |
1463 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_backend) |
1464 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cik.sc_hiz_tile_fifo_size) |
1465 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cik.sc_earlyz_tile_fifo_size)));
1467 WREG32(VGT_NUM_INSTANCES, 1);
1469 WREG32(CP_PERFMON_CNTL, 0);
1471 WREG32(SQ_CONFIG, 0);
1473 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
1474 FORCE_EOV_MAX_REZ_CNT(255)));
1476 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
1477 AUTO_INVLD_EN(ES_AND_GS_AUTO));
1479 WREG32(VGT_GS_VERTEX_REUSE, 16);
1480 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1482 tmp = RREG32(HDP_MISC_CNTL);
1483 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
1484 WREG32(HDP_MISC_CNTL, tmp);
1486 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
1487 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1489 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
1490 WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
1496 * GPU scratch registers helpers function.
1499 * cik_scratch_init - setup driver info for CP scratch regs
1501 * @rdev: radeon_device pointer
1503 * Set up the number and offset of the CP scratch registers.
1504 * NOTE: use of CP scratch registers is a legacy inferface and
1505 * is not used by default on newer asics (r6xx+). On newer asics,
1506 * memory buffers are used for fences rather than scratch regs.
1508 static void cik_scratch_init(struct radeon_device *rdev)
1512 rdev->scratch.num_reg = 7;
1513 rdev->scratch.reg_base = SCRATCH_REG0;
1514 for (i = 0; i < rdev->scratch.num_reg; i++) {
1515 rdev->scratch.free[i] = true;
1516 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
1521 * cik_ring_test - basic gfx ring test
1523 * @rdev: radeon_device pointer
1524 * @ring: radeon_ring structure holding ring information
1526 * Allocate a scratch register and write to it using the gfx ring (CIK).
1527 * Provides a basic gfx ring test to verify that the ring is working.
1528 * Used by cik_cp_gfx_resume();
1529 * Returns 0 on success, error on failure.
1531 int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
1538 r = radeon_scratch_get(rdev, &scratch);
1540 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
1543 WREG32(scratch, 0xCAFEDEAD);
1544 r = radeon_ring_lock(rdev, ring, 3);
1546 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
1547 radeon_scratch_free(rdev, scratch);
1550 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1551 radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
1552 radeon_ring_write(ring, 0xDEADBEEF);
1553 radeon_ring_unlock_commit(rdev, ring);
1554 for (i = 0; i < rdev->usec_timeout; i++) {
1555 tmp = RREG32(scratch);
1556 if (tmp == 0xDEADBEEF)
1560 if (i < rdev->usec_timeout) {
1561 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
1563 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
1564 ring->idx, scratch, tmp);
1567 radeon_scratch_free(rdev, scratch);
1572 * cik_fence_ring_emit - emit a fence on the gfx ring
1574 * @rdev: radeon_device pointer
1575 * @fence: radeon fence object
1577 * Emits a fence sequnce number on the gfx ring and flushes
1580 void cik_fence_ring_emit(struct radeon_device *rdev,
1581 struct radeon_fence *fence)
1583 struct radeon_ring *ring = &rdev->ring[fence->ring];
1584 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
1586 /* EVENT_WRITE_EOP - flush caches, send int */
1587 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1588 radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
1590 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
1592 radeon_ring_write(ring, addr & 0xfffffffc);
1593 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
1594 radeon_ring_write(ring, fence->seq);
1595 radeon_ring_write(ring, 0);
1597 /* We should be using the new WAIT_REG_MEM special op packet here
1598 * but it causes the CP to hang
1600 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1601 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
1602 WRITE_DATA_DST_SEL(0)));
1603 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
1604 radeon_ring_write(ring, 0);
1605 radeon_ring_write(ring, 0);
1608 void cik_semaphore_ring_emit(struct radeon_device *rdev,
1609 struct radeon_ring *ring,
1610 struct radeon_semaphore *semaphore,
1613 uint64_t addr = semaphore->gpu_addr;
1614 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
1616 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
1617 radeon_ring_write(ring, addr & 0xffffffff);
1618 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
1625 * cik_ring_ib_execute - emit an IB (Indirect Buffer) on the gfx ring
1627 * @rdev: radeon_device pointer
1628 * @ib: radeon indirect buffer object
1630 * Emits an DE (drawing engine) or CE (constant engine) IB
1631 * on the gfx ring. IBs are usually generated by userspace
1632 * acceleration drivers and submitted to the kernel for
1633 * sheduling on the ring. This function schedules the IB
1634 * on the gfx ring for execution by the GPU.
1636 void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1638 struct radeon_ring *ring = &rdev->ring[ib->ring];
1639 u32 header, control = INDIRECT_BUFFER_VALID;
1641 if (ib->is_const_ib) {
1642 /* set switch buffer packet before const IB */
1643 radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
1644 radeon_ring_write(ring, 0);
1646 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
1649 if (ring->rptr_save_reg) {
1650 next_rptr = ring->wptr + 3 + 4;
1651 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1652 radeon_ring_write(ring, ((ring->rptr_save_reg -
1653 PACKET3_SET_UCONFIG_REG_START) >> 2));
1654 radeon_ring_write(ring, next_rptr);
1655 } else if (rdev->wb.enabled) {
1656 next_rptr = ring->wptr + 5 + 4;
1657 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1658 radeon_ring_write(ring, WRITE_DATA_DST_SEL(1));
1659 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
1660 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
1661 radeon_ring_write(ring, next_rptr);
1664 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
1667 control |= ib->length_dw |
1668 (ib->vm ? (ib->vm->id << 24) : 0);
1670 radeon_ring_write(ring, header);
1671 radeon_ring_write(ring,
1675 (ib->gpu_addr & 0xFFFFFFFC));
1676 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
1677 radeon_ring_write(ring, control);
1681 * cik_ib_test - basic gfx ring IB test
1683 * @rdev: radeon_device pointer
1684 * @ring: radeon_ring structure holding ring information
1686 * Allocate an IB and execute it on the gfx ring (CIK).
1687 * Provides a basic gfx ring test to verify that IBs are working.
1688 * Returns 0 on success, error on failure.
1690 int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
1692 struct radeon_ib ib;
1698 r = radeon_scratch_get(rdev, &scratch);
1700 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
1703 WREG32(scratch, 0xCAFEDEAD);
1704 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
1706 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
1709 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
1710 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
1711 ib.ptr[2] = 0xDEADBEEF;
1713 r = radeon_ib_schedule(rdev, &ib, NULL);
1715 radeon_scratch_free(rdev, scratch);
1716 radeon_ib_free(rdev, &ib);
1717 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
1720 r = radeon_fence_wait(ib.fence, false);
1722 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
1725 for (i = 0; i < rdev->usec_timeout; i++) {
1726 tmp = RREG32(scratch);
1727 if (tmp == 0xDEADBEEF)
1731 if (i < rdev->usec_timeout) {
1732 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
1734 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
1738 radeon_scratch_free(rdev, scratch);
1739 radeon_ib_free(rdev, &ib);
1745 * On CIK, gfx and compute now have independant command processors.
1748 * Gfx consists of a single ring and can process both gfx jobs and
1749 * compute jobs. The gfx CP consists of three microengines (ME):
1750 * PFP - Pre-Fetch Parser
1752 * CE - Constant Engine
1753 * The PFP and ME make up what is considered the Drawing Engine (DE).
1754 * The CE is an asynchronous engine used for updating buffer desciptors
1755 * used by the DE so that they can be loaded into cache in parallel
1756 * while the DE is processing state update packets.
1759 * The compute CP consists of two microengines (ME):
1760 * MEC1 - Compute MicroEngine 1
1761 * MEC2 - Compute MicroEngine 2
1762 * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
1763 * The queues are exposed to userspace and are programmed directly
1764 * by the compute runtime.
1767 * cik_cp_gfx_enable - enable/disable the gfx CP MEs
1769 * @rdev: radeon_device pointer
1770 * @enable: enable or disable the MEs
1772 * Halts or unhalts the gfx MEs.
1774 static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
1777 WREG32(CP_ME_CNTL, 0);
1779 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
1780 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1786 * cik_cp_gfx_load_microcode - load the gfx CP ME ucode
1788 * @rdev: radeon_device pointer
1790 * Loads the gfx PFP, ME, and CE ucode.
1791 * Returns 0 for success, -EINVAL if the ucode is not available.
1793 static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
1795 const __be32 *fw_data;
1798 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
1801 cik_cp_gfx_enable(rdev, false);
1804 fw_data = (const __be32 *)rdev->pfp_fw->data;
1805 WREG32(CP_PFP_UCODE_ADDR, 0);
1806 for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
1807 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1808 WREG32(CP_PFP_UCODE_ADDR, 0);
1811 fw_data = (const __be32 *)rdev->ce_fw->data;
1812 WREG32(CP_CE_UCODE_ADDR, 0);
1813 for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
1814 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
1815 WREG32(CP_CE_UCODE_ADDR, 0);
1818 fw_data = (const __be32 *)rdev->me_fw->data;
1819 WREG32(CP_ME_RAM_WADDR, 0);
1820 for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
1821 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1822 WREG32(CP_ME_RAM_WADDR, 0);
1824 WREG32(CP_PFP_UCODE_ADDR, 0);
1825 WREG32(CP_CE_UCODE_ADDR, 0);
1826 WREG32(CP_ME_RAM_WADDR, 0);
1827 WREG32(CP_ME_RAM_RADDR, 0);
1832 * cik_cp_gfx_start - start the gfx ring
1834 * @rdev: radeon_device pointer
1836 * Enables the ring and loads the clear state context and other
1837 * packets required to init the ring.
1838 * Returns 0 for success, error for failure.
1840 static int cik_cp_gfx_start(struct radeon_device *rdev)
1842 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1846 WREG32(CP_MAX_CONTEXT, rdev->config.cik.max_hw_contexts - 1);
1847 WREG32(CP_ENDIAN_SWAP, 0);
1848 WREG32(CP_DEVICE_ID, 1);
1850 cik_cp_gfx_enable(rdev, true);
1852 r = radeon_ring_lock(rdev, ring, cik_default_size + 17);
1854 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1858 /* init the CE partitions. CE only used for gfx on CIK */
1859 radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
1860 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
1861 radeon_ring_write(ring, 0xc000);
1862 radeon_ring_write(ring, 0xc000);
1864 /* setup clear context state */
1865 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1866 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1868 radeon_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1869 radeon_ring_write(ring, 0x80000000);
1870 radeon_ring_write(ring, 0x80000000);
1872 for (i = 0; i < cik_default_size; i++)
1873 radeon_ring_write(ring, cik_default_state[i]);
1875 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1876 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
1878 /* set clear context state */
1879 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1880 radeon_ring_write(ring, 0);
1882 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
1883 radeon_ring_write(ring, 0x00000316);
1884 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1885 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
1887 radeon_ring_unlock_commit(rdev, ring);
1893 * cik_cp_gfx_fini - stop the gfx ring
1895 * @rdev: radeon_device pointer
1897 * Stop the gfx ring and tear down the driver ring
1900 static void cik_cp_gfx_fini(struct radeon_device *rdev)
1902 cik_cp_gfx_enable(rdev, false);
1903 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1907 * cik_cp_gfx_resume - setup the gfx ring buffer registers
1909 * @rdev: radeon_device pointer
1911 * Program the location and size of the gfx ring buffer
1912 * and test it to make sure it's working.
1913 * Returns 0 for success, error for failure.
1915 static int cik_cp_gfx_resume(struct radeon_device *rdev)
1917 struct radeon_ring *ring;
1923 WREG32(CP_SEM_WAIT_TIMER, 0x0);
1924 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
1926 /* Set the write pointer delay */
1927 WREG32(CP_RB_WPTR_DELAY, 0);
1929 /* set the RB to use vmid 0 */
1930 WREG32(CP_RB_VMID, 0);
1932 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1934 /* ring 0 - compute and gfx */
1935 /* Set ring buffer size */
1936 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1937 rb_bufsz = drm_order(ring->ring_size / 8);
1938 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1940 tmp |= BUF_SWAP_32BIT;
1942 WREG32(CP_RB0_CNTL, tmp);
1944 /* Initialize the ring buffer's read and write pointers */
1945 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
1947 WREG32(CP_RB0_WPTR, ring->wptr);
1949 /* set the wb address wether it's enabled or not */
1950 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
1951 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1953 /* scratch register shadowing is no longer supported */
1954 WREG32(SCRATCH_UMSK, 0);
1956 if (!rdev->wb.enabled)
1957 tmp |= RB_NO_UPDATE;
1960 WREG32(CP_RB0_CNTL, tmp);
1962 rb_addr = ring->gpu_addr >> 8;
1963 WREG32(CP_RB0_BASE, rb_addr);
1964 WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr));
1966 ring->rptr = RREG32(CP_RB0_RPTR);
1968 /* start the ring */
1969 cik_cp_gfx_start(rdev);
1970 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1971 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1973 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1980 * cik_cp_compute_enable - enable/disable the compute CP MEs
1982 * @rdev: radeon_device pointer
1983 * @enable: enable or disable the MEs
1985 * Halts or unhalts the compute MEs.
1987 static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
1990 WREG32(CP_MEC_CNTL, 0);
1992 WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
1997 * cik_cp_compute_load_microcode - load the compute CP ME ucode
1999 * @rdev: radeon_device pointer
2001 * Loads the compute MEC1&2 ucode.
2002 * Returns 0 for success, -EINVAL if the ucode is not available.
2004 static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
2006 const __be32 *fw_data;
2012 cik_cp_compute_enable(rdev, false);
2015 fw_data = (const __be32 *)rdev->mec_fw->data;
2016 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
2017 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
2018 WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
2019 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
2021 if (rdev->family == CHIP_KAVERI) {
2023 fw_data = (const __be32 *)rdev->mec_fw->data;
2024 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
2025 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
2026 WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
2027 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
2034 * cik_cp_compute_start - start the compute queues
2036 * @rdev: radeon_device pointer
2038 * Enable the compute queues.
2039 * Returns 0 for success, error for failure.
2041 static int cik_cp_compute_start(struct radeon_device *rdev)
2048 * cik_cp_compute_fini - stop the compute queues
2050 * @rdev: radeon_device pointer
2052 * Stop the compute queues and tear down the driver queue
2055 static void cik_cp_compute_fini(struct radeon_device *rdev)
2057 cik_cp_compute_enable(rdev, false);
2062 * cik_cp_compute_resume - setup the compute queue registers
2064 * @rdev: radeon_device pointer
2066 * Program the compute queues and test them to make sure they
2068 * Returns 0 for success, error for failure.
2070 static int cik_cp_compute_resume(struct radeon_device *rdev)
2075 r = cik_cp_compute_start(rdev);
2081 /* XXX temporary wrappers to handle both compute and gfx */
2083 static void cik_cp_enable(struct radeon_device *rdev, bool enable)
2085 cik_cp_gfx_enable(rdev, enable);
2086 cik_cp_compute_enable(rdev, enable);
2090 static int cik_cp_load_microcode(struct radeon_device *rdev)
2094 r = cik_cp_gfx_load_microcode(rdev);
2097 r = cik_cp_compute_load_microcode(rdev);
2105 static void cik_cp_fini(struct radeon_device *rdev)
2107 cik_cp_gfx_fini(rdev);
2108 cik_cp_compute_fini(rdev);
2112 static int cik_cp_resume(struct radeon_device *rdev)
2116 /* Reset all cp blocks */
2117 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2118 RREG32(GRBM_SOFT_RESET);
2120 WREG32(GRBM_SOFT_RESET, 0);
2121 RREG32(GRBM_SOFT_RESET);
2123 r = cik_cp_load_microcode(rdev);
2127 r = cik_cp_gfx_resume(rdev);
2130 r = cik_cp_compute_resume(rdev);
2138 * cik_gpu_is_lockup - check if the 3D engine is locked up
2140 * @rdev: radeon_device pointer
2141 * @ring: radeon_ring structure holding ring information
2143 * Check if the 3D engine is locked up (CIK).
2144 * Returns true if the engine is locked, false if not.
2146 bool cik_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2148 u32 srbm_status, srbm_status2;
2149 u32 grbm_status, grbm_status2;
2150 u32 grbm_status_se0, grbm_status_se1, grbm_status_se2, grbm_status_se3;
2152 srbm_status = RREG32(SRBM_STATUS);
2153 srbm_status2 = RREG32(SRBM_STATUS2);
2154 grbm_status = RREG32(GRBM_STATUS);
2155 grbm_status2 = RREG32(GRBM_STATUS2);
2156 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
2157 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
2158 grbm_status_se2 = RREG32(GRBM_STATUS_SE2);
2159 grbm_status_se3 = RREG32(GRBM_STATUS_SE3);
2160 if (!(grbm_status & GUI_ACTIVE)) {
2161 radeon_ring_lockup_update(ring);
2164 /* force CP activities */
2165 radeon_ring_force_activity(rdev, ring);
2166 return radeon_ring_test_lockup(rdev, ring);
2170 * cik_gfx_gpu_soft_reset - soft reset the 3D engine and CPG
2172 * @rdev: radeon_device pointer
2174 * Soft reset the GFX engine and CPG blocks (CIK).
2175 * XXX: deal with reseting RLC and CPF
2176 * Returns 0 for success.
2178 static int cik_gfx_gpu_soft_reset(struct radeon_device *rdev)
2180 struct evergreen_mc_save save;
2183 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2186 dev_info(rdev->dev, "GPU GFX softreset \n");
2187 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2188 RREG32(GRBM_STATUS));
2189 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
2190 RREG32(GRBM_STATUS2));
2191 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2192 RREG32(GRBM_STATUS_SE0));
2193 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2194 RREG32(GRBM_STATUS_SE1));
2195 dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
2196 RREG32(GRBM_STATUS_SE2));
2197 dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
2198 RREG32(GRBM_STATUS_SE3));
2199 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2200 RREG32(SRBM_STATUS));
2201 dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
2202 RREG32(SRBM_STATUS2));
2203 evergreen_mc_stop(rdev, &save);
2204 if (radeon_mc_wait_for_idle(rdev)) {
2205 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2207 /* Disable CP parsing/prefetching */
2208 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
2210 /* reset all the gfx block and all CPG blocks */
2211 grbm_reset = SOFT_RESET_CPG | SOFT_RESET_GFX;
2213 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
2214 WREG32(GRBM_SOFT_RESET, grbm_reset);
2215 (void)RREG32(GRBM_SOFT_RESET);
2217 WREG32(GRBM_SOFT_RESET, 0);
2218 (void)RREG32(GRBM_SOFT_RESET);
2219 /* Wait a little for things to settle down */
2221 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2222 RREG32(GRBM_STATUS));
2223 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
2224 RREG32(GRBM_STATUS2));
2225 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2226 RREG32(GRBM_STATUS_SE0));
2227 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2228 RREG32(GRBM_STATUS_SE1));
2229 dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
2230 RREG32(GRBM_STATUS_SE2));
2231 dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
2232 RREG32(GRBM_STATUS_SE3));
2233 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2234 RREG32(SRBM_STATUS));
2235 dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
2236 RREG32(SRBM_STATUS2));
2237 evergreen_mc_resume(rdev, &save);
2242 * cik_compute_gpu_soft_reset - soft reset CPC
2244 * @rdev: radeon_device pointer
2246 * Soft reset the CPC blocks (CIK).
2247 * XXX: deal with reseting RLC and CPF
2248 * Returns 0 for success.
2250 static int cik_compute_gpu_soft_reset(struct radeon_device *rdev)
2252 struct evergreen_mc_save save;
2255 dev_info(rdev->dev, "GPU compute softreset \n");
2256 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2257 RREG32(GRBM_STATUS));
2258 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
2259 RREG32(GRBM_STATUS2));
2260 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2261 RREG32(GRBM_STATUS_SE0));
2262 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2263 RREG32(GRBM_STATUS_SE1));
2264 dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
2265 RREG32(GRBM_STATUS_SE2));
2266 dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
2267 RREG32(GRBM_STATUS_SE3));
2268 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2269 RREG32(SRBM_STATUS));
2270 dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
2271 RREG32(SRBM_STATUS2));
2272 evergreen_mc_stop(rdev, &save);
2273 if (radeon_mc_wait_for_idle(rdev)) {
2274 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2276 /* Disable CP parsing/prefetching */
2277 WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
2279 /* reset all the CPC blocks */
2280 grbm_reset = SOFT_RESET_CPG;
2282 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
2283 WREG32(GRBM_SOFT_RESET, grbm_reset);
2284 (void)RREG32(GRBM_SOFT_RESET);
2286 WREG32(GRBM_SOFT_RESET, 0);
2287 (void)RREG32(GRBM_SOFT_RESET);
2288 /* Wait a little for things to settle down */
2290 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2291 RREG32(GRBM_STATUS));
2292 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
2293 RREG32(GRBM_STATUS2));
2294 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2295 RREG32(GRBM_STATUS_SE0));
2296 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2297 RREG32(GRBM_STATUS_SE1));
2298 dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
2299 RREG32(GRBM_STATUS_SE2));
2300 dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
2301 RREG32(GRBM_STATUS_SE3));
2302 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2303 RREG32(SRBM_STATUS));
2304 dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
2305 RREG32(SRBM_STATUS2));
2306 evergreen_mc_resume(rdev, &save);
2311 * cik_asic_reset - soft reset compute and gfx
2313 * @rdev: radeon_device pointer
2315 * Soft reset the CPC blocks (CIK).
2316 * XXX: make this more fine grained and only reset
2317 * what is necessary.
2318 * Returns 0 for success.
2320 int cik_asic_reset(struct radeon_device *rdev)
2324 r = cik_compute_gpu_soft_reset(rdev);
2326 dev_info(rdev->dev, "Compute reset failed!\n");
2328 return cik_gfx_gpu_soft_reset(rdev);
2333 * cik_mc_program - program the GPU memory controller
2335 * @rdev: radeon_device pointer
2337 * Set the location of vram, gart, and AGP in the GPU's
2338 * physical address space (CIK).
2340 static void cik_mc_program(struct radeon_device *rdev)
2342 struct evergreen_mc_save save;
2346 /* Initialize HDP */
2347 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
2348 WREG32((0x2c14 + j), 0x00000000);
2349 WREG32((0x2c18 + j), 0x00000000);
2350 WREG32((0x2c1c + j), 0x00000000);
2351 WREG32((0x2c20 + j), 0x00000000);
2352 WREG32((0x2c24 + j), 0x00000000);
2354 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
2356 evergreen_mc_stop(rdev, &save);
2357 if (radeon_mc_wait_for_idle(rdev)) {
2358 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2360 /* Lockout access through VGA aperture*/
2361 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
2362 /* Update configuration */
2363 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2364 rdev->mc.vram_start >> 12);
2365 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2366 rdev->mc.vram_end >> 12);
2367 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
2368 rdev->vram_scratch.gpu_addr >> 12);
2369 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
2370 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
2371 WREG32(MC_VM_FB_LOCATION, tmp);
2372 /* XXX double check these! */
2373 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
2374 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
2375 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
2376 WREG32(MC_VM_AGP_BASE, 0);
2377 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
2378 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
2379 if (radeon_mc_wait_for_idle(rdev)) {
2380 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2382 evergreen_mc_resume(rdev, &save);
2383 /* we need to own VRAM, so turn off the VGA renderer here
2384 * to stop it overwriting our objects */
2385 rv515_vga_render_disable(rdev);
2389 * cik_mc_init - initialize the memory controller driver params
2391 * @rdev: radeon_device pointer
2393 * Look up the amount of vram, vram width, and decide how to place
2394 * vram and gart within the GPU's physical address space (CIK).
2395 * Returns 0 for success.
2397 static int cik_mc_init(struct radeon_device *rdev)
2400 int chansize, numchan;
2402 /* Get VRAM informations */
2403 rdev->mc.vram_is_ddr = true;
2404 tmp = RREG32(MC_ARB_RAMCFG);
2405 if (tmp & CHANSIZE_MASK) {
2410 tmp = RREG32(MC_SHARED_CHMAP);
2411 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
2441 rdev->mc.vram_width = numchan * chansize;
2442 /* Could aper size report 0 ? */
2443 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2444 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
2445 /* size in MB on si */
2446 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2447 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2448 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2449 si_vram_gtt_location(rdev, &rdev->mc);
2450 radeon_update_bandwidth_info(rdev);
2457 * VMID 0 is the physical GPU addresses as used by the kernel.
2458 * VMIDs 1-15 are used for userspace clients and are handled
2459 * by the radeon vm/hsa code.
2462 * cik_pcie_gart_tlb_flush - gart tlb flush callback
2464 * @rdev: radeon_device pointer
2466 * Flush the TLB for the VMID 0 page table (CIK).
2468 void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
2470 /* flush hdp cache */
2471 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
2473 /* bits 0-15 are the VM contexts0-15 */
2474 WREG32(VM_INVALIDATE_REQUEST, 0x1);
2478 * cik_pcie_gart_enable - gart enable
2480 * @rdev: radeon_device pointer
2482 * This sets up the TLBs, programs the page tables for VMID0,
2483 * sets up the hw for VMIDs 1-15 which are allocated on
2484 * demand, and sets up the global locations for the LDS, GDS,
2485 * and GPUVM for FSA64 clients (CIK).
2486 * Returns 0 for success, errors for failure.
2488 static int cik_pcie_gart_enable(struct radeon_device *rdev)
2492 if (rdev->gart.robj == NULL) {
2493 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
2496 r = radeon_gart_table_vram_pin(rdev);
2499 radeon_gart_restore(rdev);
2500 /* Setup TLB control */
2501 WREG32(MC_VM_MX_L1_TLB_CNTL,
2504 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
2505 ENABLE_ADVANCED_DRIVER_MODEL |
2506 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
2507 /* Setup L2 cache */
2508 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
2509 ENABLE_L2_FRAGMENT_PROCESSING |
2510 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
2511 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
2512 EFFECTIVE_L2_QUEUE_SIZE(7) |
2513 CONTEXT1_IDENTITY_ACCESS_MODE(1));
2514 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
2515 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
2516 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
2517 /* setup context0 */
2518 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
2519 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
2520 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
2521 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
2522 (u32)(rdev->dummy_page.addr >> 12));
2523 WREG32(VM_CONTEXT0_CNTL2, 0);
2524 WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
2525 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
2531 /* empty context1-15 */
2532 /* FIXME start with 4G, once using 2 level pt switch to full
2535 /* set vm size, must be a multiple of 4 */
2536 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
2537 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
2538 for (i = 1; i < 16; i++) {
2540 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
2541 rdev->gart.table_addr >> 12);
2543 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
2544 rdev->gart.table_addr >> 12);
2547 /* enable context1-15 */
2548 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
2549 (u32)(rdev->dummy_page.addr >> 12));
2550 WREG32(VM_CONTEXT1_CNTL2, 4);
2551 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
2552 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2553 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
2554 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2555 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
2556 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
2557 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
2558 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
2559 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
2560 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
2561 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
2562 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2563 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
2565 /* TC cache setup ??? */
2566 WREG32(TC_CFG_L1_LOAD_POLICY0, 0);
2567 WREG32(TC_CFG_L1_LOAD_POLICY1, 0);
2568 WREG32(TC_CFG_L1_STORE_POLICY, 0);
2570 WREG32(TC_CFG_L2_LOAD_POLICY0, 0);
2571 WREG32(TC_CFG_L2_LOAD_POLICY1, 0);
2572 WREG32(TC_CFG_L2_STORE_POLICY0, 0);
2573 WREG32(TC_CFG_L2_STORE_POLICY1, 0);
2574 WREG32(TC_CFG_L2_ATOMIC_POLICY, 0);
2576 WREG32(TC_CFG_L1_VOLATILE, 0);
2577 WREG32(TC_CFG_L2_VOLATILE, 0);
2579 if (rdev->family == CHIP_KAVERI) {
2580 u32 tmp = RREG32(CHUB_CONTROL);
2582 WREG32(CHUB_CONTROL, tmp);
2585 /* XXX SH_MEM regs */
2586 /* where to put LDS, scratch, GPUVM in FSA64 space */
2587 for (i = 0; i < 16; i++) {
2588 WREG32(SRBM_GFX_CNTL, VMID(i));
2589 WREG32(SH_MEM_CONFIG, 0);
2590 WREG32(SH_MEM_APE1_BASE, 1);
2591 WREG32(SH_MEM_APE1_LIMIT, 0);
2592 WREG32(SH_MEM_BASES, 0);
2594 WREG32(SRBM_GFX_CNTL, 0);
2596 cik_pcie_gart_tlb_flush(rdev);
2597 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
2598 (unsigned)(rdev->mc.gtt_size >> 20),
2599 (unsigned long long)rdev->gart.table_addr);
2600 rdev->gart.ready = true;
2605 * cik_pcie_gart_disable - gart disable
2607 * @rdev: radeon_device pointer
2609 * This disables all VM page table (CIK).
2611 static void cik_pcie_gart_disable(struct radeon_device *rdev)
2613 /* Disable all tables */
2614 WREG32(VM_CONTEXT0_CNTL, 0);
2615 WREG32(VM_CONTEXT1_CNTL, 0);
2616 /* Setup TLB control */
2617 WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
2618 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
2619 /* Setup L2 cache */
2621 ENABLE_L2_FRAGMENT_PROCESSING |
2622 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
2623 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
2624 EFFECTIVE_L2_QUEUE_SIZE(7) |
2625 CONTEXT1_IDENTITY_ACCESS_MODE(1));
2626 WREG32(VM_L2_CNTL2, 0);
2627 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
2628 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
2629 radeon_gart_table_vram_unpin(rdev);
2633 * cik_pcie_gart_fini - vm fini callback
2635 * @rdev: radeon_device pointer
2637 * Tears down the driver GART/VM setup (CIK).
2639 static void cik_pcie_gart_fini(struct radeon_device *rdev)
2641 cik_pcie_gart_disable(rdev);
2642 radeon_gart_table_vram_free(rdev);
2643 radeon_gart_fini(rdev);
2648 * cik_ib_parse - vm ib_parse callback
2650 * @rdev: radeon_device pointer
2651 * @ib: indirect buffer pointer
2653 * CIK uses hw IB checking so this is a nop (CIK).
2655 int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
2662 * VMID 0 is the physical GPU addresses as used by the kernel.
2663 * VMIDs 1-15 are used for userspace clients and are handled
2664 * by the radeon vm/hsa code.
2667 * cik_vm_init - cik vm init callback
2669 * @rdev: radeon_device pointer
2671 * Inits cik specific vm parameters (number of VMs, base of vram for
2672 * VMIDs 1-15) (CIK).
2673 * Returns 0 for success.
2675 int cik_vm_init(struct radeon_device *rdev)
2678 rdev->vm_manager.nvm = 16;
2679 /* base offset of vram pages */
2680 if (rdev->flags & RADEON_IS_IGP) {
2681 u64 tmp = RREG32(MC_VM_FB_OFFSET);
2683 rdev->vm_manager.vram_base_offset = tmp;
2685 rdev->vm_manager.vram_base_offset = 0;
2691 * cik_vm_fini - cik vm fini callback
2693 * @rdev: radeon_device pointer
2695 * Tear down any asic specific VM setup (CIK).
2697 void cik_vm_fini(struct radeon_device *rdev)
2702 * cik_vm_flush - cik vm flush using the CP
2704 * @rdev: radeon_device pointer
2706 * Update the page table base and flush the VM TLB
2707 * using the CP (CIK).
2709 void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2711 struct radeon_ring *ring = &rdev->ring[ridx];
2716 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2717 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2718 WRITE_DATA_DST_SEL(0)));
2720 radeon_ring_write(ring,
2721 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
2723 radeon_ring_write(ring,
2724 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
2726 radeon_ring_write(ring, 0);
2727 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
2729 /* update SH_MEM_* regs */
2730 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2731 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2732 WRITE_DATA_DST_SEL(0)));
2733 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
2734 radeon_ring_write(ring, 0);
2735 radeon_ring_write(ring, VMID(vm->id));
2737 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
2738 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2739 WRITE_DATA_DST_SEL(0)));
2740 radeon_ring_write(ring, SH_MEM_BASES >> 2);
2741 radeon_ring_write(ring, 0);
2743 radeon_ring_write(ring, 0); /* SH_MEM_BASES */
2744 radeon_ring_write(ring, 0); /* SH_MEM_CONFIG */
2745 radeon_ring_write(ring, 1); /* SH_MEM_APE1_BASE */
2746 radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
2748 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2749 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2750 WRITE_DATA_DST_SEL(0)));
2751 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
2752 radeon_ring_write(ring, 0);
2753 radeon_ring_write(ring, VMID(0));
2756 /* We should be using the WAIT_REG_MEM packet here like in
2757 * cik_fence_ring_emit(), but it causes the CP to hang in this
2760 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2761 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2762 WRITE_DATA_DST_SEL(0)));
2763 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
2764 radeon_ring_write(ring, 0);
2765 radeon_ring_write(ring, 0);
2767 /* bits 0-15 are the VM contexts0-15 */
2768 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2769 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2770 WRITE_DATA_DST_SEL(0)));
2771 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
2772 radeon_ring_write(ring, 0);
2773 radeon_ring_write(ring, 1 << vm->id);
2775 /* sync PFP to ME, otherwise we might get invalid PFP reads */
2776 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2777 radeon_ring_write(ring, 0x0);