]> rtime.felk.cvut.cz Git - linux-imx.git/blob - drivers/gpu/drm/radeon/ni.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux-imx.git] / drivers / gpu / drm / radeon / ni.c
1 /*
2  * Copyright 2010 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 #include <linux/firmware.h>
25 #include <linux/slab.h>
26 #include <linux/module.h>
27 #include <drm/drmP.h>
28 #include "radeon.h"
29 #include "radeon_asic.h"
30 #include <drm/radeon_drm.h>
31 #include "nid.h"
32 #include "atom.h"
33 #include "ni_reg.h"
34 #include "cayman_blit_shaders.h"
35 #include "radeon_ucode.h"
36 #include "clearstate_cayman.h"
37
38 static u32 tn_rlc_save_restore_register_list[] =
39 {
40         0x98fc,
41         0x98f0,
42         0x9834,
43         0x9838,
44         0x9870,
45         0x9874,
46         0x8a14,
47         0x8b24,
48         0x8bcc,
49         0x8b10,
50         0x8c30,
51         0x8d00,
52         0x8d04,
53         0x8c00,
54         0x8c04,
55         0x8c10,
56         0x8c14,
57         0x8d8c,
58         0x8cf0,
59         0x8e38,
60         0x9508,
61         0x9688,
62         0x9608,
63         0x960c,
64         0x9610,
65         0x9614,
66         0x88c4,
67         0x8978,
68         0x88d4,
69         0x900c,
70         0x9100,
71         0x913c,
72         0x90e8,
73         0x9354,
74         0xa008,
75         0x98f8,
76         0x9148,
77         0x914c,
78         0x3f94,
79         0x98f4,
80         0x9b7c,
81         0x3f8c,
82         0x8950,
83         0x8954,
84         0x8a18,
85         0x8b28,
86         0x9144,
87         0x3f90,
88         0x915c,
89         0x9160,
90         0x9178,
91         0x917c,
92         0x9180,
93         0x918c,
94         0x9190,
95         0x9194,
96         0x9198,
97         0x919c,
98         0x91a8,
99         0x91ac,
100         0x91b0,
101         0x91b4,
102         0x91b8,
103         0x91c4,
104         0x91c8,
105         0x91cc,
106         0x91d0,
107         0x91d4,
108         0x91e0,
109         0x91e4,
110         0x91ec,
111         0x91f0,
112         0x91f4,
113         0x9200,
114         0x9204,
115         0x929c,
116         0x8030,
117         0x9150,
118         0x9a60,
119         0x920c,
120         0x9210,
121         0x9228,
122         0x922c,
123         0x9244,
124         0x9248,
125         0x91e8,
126         0x9294,
127         0x9208,
128         0x9224,
129         0x9240,
130         0x9220,
131         0x923c,
132         0x9258,
133         0x9744,
134         0xa200,
135         0xa204,
136         0xa208,
137         0xa20c,
138         0x8d58,
139         0x9030,
140         0x9034,
141         0x9038,
142         0x903c,
143         0x9040,
144         0x9654,
145         0x897c,
146         0xa210,
147         0xa214,
148         0x9868,
149         0xa02c,
150         0x9664,
151         0x9698,
152         0x949c,
153         0x8e10,
154         0x8e18,
155         0x8c50,
156         0x8c58,
157         0x8c60,
158         0x8c68,
159         0x89b4,
160         0x9830,
161         0x802c,
162 };
163 static u32 tn_rlc_save_restore_register_list_size = ARRAY_SIZE(tn_rlc_save_restore_register_list);
164
165 extern bool evergreen_is_display_hung(struct radeon_device *rdev);
166 extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
167 extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
168 extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
169 extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
170 extern void evergreen_mc_program(struct radeon_device *rdev);
171 extern void evergreen_irq_suspend(struct radeon_device *rdev);
172 extern int evergreen_mc_init(struct radeon_device *rdev);
173 extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
174 extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
175 extern void evergreen_program_aspm(struct radeon_device *rdev);
176 extern void sumo_rlc_fini(struct radeon_device *rdev);
177 extern int sumo_rlc_init(struct radeon_device *rdev);
178
179 /* Firmware Names */
180 MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
181 MODULE_FIRMWARE("radeon/BARTS_me.bin");
182 MODULE_FIRMWARE("radeon/BARTS_mc.bin");
183 MODULE_FIRMWARE("radeon/BARTS_smc.bin");
184 MODULE_FIRMWARE("radeon/BTC_rlc.bin");
185 MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
186 MODULE_FIRMWARE("radeon/TURKS_me.bin");
187 MODULE_FIRMWARE("radeon/TURKS_mc.bin");
188 MODULE_FIRMWARE("radeon/TURKS_smc.bin");
189 MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
190 MODULE_FIRMWARE("radeon/CAICOS_me.bin");
191 MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
192 MODULE_FIRMWARE("radeon/CAICOS_smc.bin");
193 MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
194 MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
195 MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
196 MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
197 MODULE_FIRMWARE("radeon/CAYMAN_smc.bin");
198 MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
199 MODULE_FIRMWARE("radeon/ARUBA_me.bin");
200 MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
201
202
203 static const u32 cayman_golden_registers2[] =
204 {
205         0x3e5c, 0xffffffff, 0x00000000,
206         0x3e48, 0xffffffff, 0x00000000,
207         0x3e4c, 0xffffffff, 0x00000000,
208         0x3e64, 0xffffffff, 0x00000000,
209         0x3e50, 0xffffffff, 0x00000000,
210         0x3e60, 0xffffffff, 0x00000000
211 };
212
213 static const u32 cayman_golden_registers[] =
214 {
215         0x5eb4, 0xffffffff, 0x00000002,
216         0x5e78, 0x8f311ff1, 0x001000f0,
217         0x3f90, 0xffff0000, 0xff000000,
218         0x9148, 0xffff0000, 0xff000000,
219         0x3f94, 0xffff0000, 0xff000000,
220         0x914c, 0xffff0000, 0xff000000,
221         0xc78, 0x00000080, 0x00000080,
222         0xbd4, 0x70073777, 0x00011003,
223         0xd02c, 0xbfffff1f, 0x08421000,
224         0xd0b8, 0x73773777, 0x02011003,
225         0x5bc0, 0x00200000, 0x50100000,
226         0x98f8, 0x33773777, 0x02011003,
227         0x98fc, 0xffffffff, 0x76541032,
228         0x7030, 0x31000311, 0x00000011,
229         0x2f48, 0x33773777, 0x42010001,
230         0x6b28, 0x00000010, 0x00000012,
231         0x7728, 0x00000010, 0x00000012,
232         0x10328, 0x00000010, 0x00000012,
233         0x10f28, 0x00000010, 0x00000012,
234         0x11b28, 0x00000010, 0x00000012,
235         0x12728, 0x00000010, 0x00000012,
236         0x240c, 0x000007ff, 0x00000000,
237         0x8a14, 0xf000001f, 0x00000007,
238         0x8b24, 0x3fff3fff, 0x00ff0fff,
239         0x8b10, 0x0000ff0f, 0x00000000,
240         0x28a4c, 0x07ffffff, 0x06000000,
241         0x10c, 0x00000001, 0x00010003,
242         0xa02c, 0xffffffff, 0x0000009b,
243         0x913c, 0x0000010f, 0x01000100,
244         0x8c04, 0xf8ff00ff, 0x40600060,
245         0x28350, 0x00000f01, 0x00000000,
246         0x9508, 0x3700001f, 0x00000002,
247         0x960c, 0xffffffff, 0x54763210,
248         0x88c4, 0x001f3ae3, 0x00000082,
249         0x88d0, 0xffffffff, 0x0f40df40,
250         0x88d4, 0x0000001f, 0x00000010,
251         0x8974, 0xffffffff, 0x00000000
252 };
253
254 static const u32 dvst_golden_registers2[] =
255 {
256         0x8f8, 0xffffffff, 0,
257         0x8fc, 0x00380000, 0,
258         0x8f8, 0xffffffff, 1,
259         0x8fc, 0x0e000000, 0
260 };
261
262 static const u32 dvst_golden_registers[] =
263 {
264         0x690, 0x3fff3fff, 0x20c00033,
265         0x918c, 0x0fff0fff, 0x00010006,
266         0x91a8, 0x0fff0fff, 0x00010006,
267         0x9150, 0xffffdfff, 0x6e944040,
268         0x917c, 0x0fff0fff, 0x00030002,
269         0x9198, 0x0fff0fff, 0x00030002,
270         0x915c, 0x0fff0fff, 0x00010000,
271         0x3f90, 0xffff0001, 0xff000000,
272         0x9178, 0x0fff0fff, 0x00070000,
273         0x9194, 0x0fff0fff, 0x00070000,
274         0x9148, 0xffff0001, 0xff000000,
275         0x9190, 0x0fff0fff, 0x00090008,
276         0x91ac, 0x0fff0fff, 0x00090008,
277         0x3f94, 0xffff0000, 0xff000000,
278         0x914c, 0xffff0000, 0xff000000,
279         0x929c, 0x00000fff, 0x00000001,
280         0x55e4, 0xff607fff, 0xfc000100,
281         0x8a18, 0xff000fff, 0x00000100,
282         0x8b28, 0xff000fff, 0x00000100,
283         0x9144, 0xfffc0fff, 0x00000100,
284         0x6ed8, 0x00010101, 0x00010000,
285         0x9830, 0xffffffff, 0x00000000,
286         0x9834, 0xf00fffff, 0x00000400,
287         0x9838, 0xfffffffe, 0x00000000,
288         0xd0c0, 0xff000fff, 0x00000100,
289         0xd02c, 0xbfffff1f, 0x08421000,
290         0xd0b8, 0x73773777, 0x12010001,
291         0x5bb0, 0x000000f0, 0x00000070,
292         0x98f8, 0x73773777, 0x12010001,
293         0x98fc, 0xffffffff, 0x00000010,
294         0x9b7c, 0x00ff0000, 0x00fc0000,
295         0x8030, 0x00001f0f, 0x0000100a,
296         0x2f48, 0x73773777, 0x12010001,
297         0x2408, 0x00030000, 0x000c007f,
298         0x8a14, 0xf000003f, 0x00000007,
299         0x8b24, 0x3fff3fff, 0x00ff0fff,
300         0x8b10, 0x0000ff0f, 0x00000000,
301         0x28a4c, 0x07ffffff, 0x06000000,
302         0x4d8, 0x00000fff, 0x00000100,
303         0xa008, 0xffffffff, 0x00010000,
304         0x913c, 0xffff03ff, 0x01000100,
305         0x8c00, 0x000000ff, 0x00000003,
306         0x8c04, 0xf8ff00ff, 0x40600060,
307         0x8cf0, 0x1fff1fff, 0x08e00410,
308         0x28350, 0x00000f01, 0x00000000,
309         0x9508, 0xf700071f, 0x00000002,
310         0x960c, 0xffffffff, 0x54763210,
311         0x20ef8, 0x01ff01ff, 0x00000002,
312         0x20e98, 0xfffffbff, 0x00200000,
313         0x2015c, 0xffffffff, 0x00000f40,
314         0x88c4, 0x001f3ae3, 0x00000082,
315         0x8978, 0x3fffffff, 0x04050140,
316         0x88d4, 0x0000001f, 0x00000010,
317         0x8974, 0xffffffff, 0x00000000
318 };
319
320 static const u32 scrapper_golden_registers[] =
321 {
322         0x690, 0x3fff3fff, 0x20c00033,
323         0x918c, 0x0fff0fff, 0x00010006,
324         0x918c, 0x0fff0fff, 0x00010006,
325         0x91a8, 0x0fff0fff, 0x00010006,
326         0x91a8, 0x0fff0fff, 0x00010006,
327         0x9150, 0xffffdfff, 0x6e944040,
328         0x9150, 0xffffdfff, 0x6e944040,
329         0x917c, 0x0fff0fff, 0x00030002,
330         0x917c, 0x0fff0fff, 0x00030002,
331         0x9198, 0x0fff0fff, 0x00030002,
332         0x9198, 0x0fff0fff, 0x00030002,
333         0x915c, 0x0fff0fff, 0x00010000,
334         0x915c, 0x0fff0fff, 0x00010000,
335         0x3f90, 0xffff0001, 0xff000000,
336         0x3f90, 0xffff0001, 0xff000000,
337         0x9178, 0x0fff0fff, 0x00070000,
338         0x9178, 0x0fff0fff, 0x00070000,
339         0x9194, 0x0fff0fff, 0x00070000,
340         0x9194, 0x0fff0fff, 0x00070000,
341         0x9148, 0xffff0001, 0xff000000,
342         0x9148, 0xffff0001, 0xff000000,
343         0x9190, 0x0fff0fff, 0x00090008,
344         0x9190, 0x0fff0fff, 0x00090008,
345         0x91ac, 0x0fff0fff, 0x00090008,
346         0x91ac, 0x0fff0fff, 0x00090008,
347         0x3f94, 0xffff0000, 0xff000000,
348         0x3f94, 0xffff0000, 0xff000000,
349         0x914c, 0xffff0000, 0xff000000,
350         0x914c, 0xffff0000, 0xff000000,
351         0x929c, 0x00000fff, 0x00000001,
352         0x929c, 0x00000fff, 0x00000001,
353         0x55e4, 0xff607fff, 0xfc000100,
354         0x8a18, 0xff000fff, 0x00000100,
355         0x8a18, 0xff000fff, 0x00000100,
356         0x8b28, 0xff000fff, 0x00000100,
357         0x8b28, 0xff000fff, 0x00000100,
358         0x9144, 0xfffc0fff, 0x00000100,
359         0x9144, 0xfffc0fff, 0x00000100,
360         0x6ed8, 0x00010101, 0x00010000,
361         0x9830, 0xffffffff, 0x00000000,
362         0x9830, 0xffffffff, 0x00000000,
363         0x9834, 0xf00fffff, 0x00000400,
364         0x9834, 0xf00fffff, 0x00000400,
365         0x9838, 0xfffffffe, 0x00000000,
366         0x9838, 0xfffffffe, 0x00000000,
367         0xd0c0, 0xff000fff, 0x00000100,
368         0xd02c, 0xbfffff1f, 0x08421000,
369         0xd02c, 0xbfffff1f, 0x08421000,
370         0xd0b8, 0x73773777, 0x12010001,
371         0xd0b8, 0x73773777, 0x12010001,
372         0x5bb0, 0x000000f0, 0x00000070,
373         0x98f8, 0x73773777, 0x12010001,
374         0x98f8, 0x73773777, 0x12010001,
375         0x98fc, 0xffffffff, 0x00000010,
376         0x98fc, 0xffffffff, 0x00000010,
377         0x9b7c, 0x00ff0000, 0x00fc0000,
378         0x9b7c, 0x00ff0000, 0x00fc0000,
379         0x8030, 0x00001f0f, 0x0000100a,
380         0x8030, 0x00001f0f, 0x0000100a,
381         0x2f48, 0x73773777, 0x12010001,
382         0x2f48, 0x73773777, 0x12010001,
383         0x2408, 0x00030000, 0x000c007f,
384         0x8a14, 0xf000003f, 0x00000007,
385         0x8a14, 0xf000003f, 0x00000007,
386         0x8b24, 0x3fff3fff, 0x00ff0fff,
387         0x8b24, 0x3fff3fff, 0x00ff0fff,
388         0x8b10, 0x0000ff0f, 0x00000000,
389         0x8b10, 0x0000ff0f, 0x00000000,
390         0x28a4c, 0x07ffffff, 0x06000000,
391         0x28a4c, 0x07ffffff, 0x06000000,
392         0x4d8, 0x00000fff, 0x00000100,
393         0x4d8, 0x00000fff, 0x00000100,
394         0xa008, 0xffffffff, 0x00010000,
395         0xa008, 0xffffffff, 0x00010000,
396         0x913c, 0xffff03ff, 0x01000100,
397         0x913c, 0xffff03ff, 0x01000100,
398         0x90e8, 0x001fffff, 0x010400c0,
399         0x8c00, 0x000000ff, 0x00000003,
400         0x8c00, 0x000000ff, 0x00000003,
401         0x8c04, 0xf8ff00ff, 0x40600060,
402         0x8c04, 0xf8ff00ff, 0x40600060,
403         0x8c30, 0x0000000f, 0x00040005,
404         0x8cf0, 0x1fff1fff, 0x08e00410,
405         0x8cf0, 0x1fff1fff, 0x08e00410,
406         0x900c, 0x00ffffff, 0x0017071f,
407         0x28350, 0x00000f01, 0x00000000,
408         0x28350, 0x00000f01, 0x00000000,
409         0x9508, 0xf700071f, 0x00000002,
410         0x9508, 0xf700071f, 0x00000002,
411         0x9688, 0x00300000, 0x0017000f,
412         0x960c, 0xffffffff, 0x54763210,
413         0x960c, 0xffffffff, 0x54763210,
414         0x20ef8, 0x01ff01ff, 0x00000002,
415         0x20e98, 0xfffffbff, 0x00200000,
416         0x2015c, 0xffffffff, 0x00000f40,
417         0x88c4, 0x001f3ae3, 0x00000082,
418         0x88c4, 0x001f3ae3, 0x00000082,
419         0x8978, 0x3fffffff, 0x04050140,
420         0x8978, 0x3fffffff, 0x04050140,
421         0x88d4, 0x0000001f, 0x00000010,
422         0x88d4, 0x0000001f, 0x00000010,
423         0x8974, 0xffffffff, 0x00000000,
424         0x8974, 0xffffffff, 0x00000000
425 };
426
427 static void ni_init_golden_registers(struct radeon_device *rdev)
428 {
429         switch (rdev->family) {
430         case CHIP_CAYMAN:
431                 radeon_program_register_sequence(rdev,
432                                                  cayman_golden_registers,
433                                                  (const u32)ARRAY_SIZE(cayman_golden_registers));
434                 radeon_program_register_sequence(rdev,
435                                                  cayman_golden_registers2,
436                                                  (const u32)ARRAY_SIZE(cayman_golden_registers2));
437                 break;
438         case CHIP_ARUBA:
439                 if ((rdev->pdev->device == 0x9900) ||
440                     (rdev->pdev->device == 0x9901) ||
441                     (rdev->pdev->device == 0x9903) ||
442                     (rdev->pdev->device == 0x9904) ||
443                     (rdev->pdev->device == 0x9905) ||
444                     (rdev->pdev->device == 0x9906) ||
445                     (rdev->pdev->device == 0x9907) ||
446                     (rdev->pdev->device == 0x9908) ||
447                     (rdev->pdev->device == 0x9909) ||
448                     (rdev->pdev->device == 0x990A) ||
449                     (rdev->pdev->device == 0x990B) ||
450                     (rdev->pdev->device == 0x990C) ||
451                     (rdev->pdev->device == 0x990D) ||
452                     (rdev->pdev->device == 0x990E) ||
453                     (rdev->pdev->device == 0x990F) ||
454                     (rdev->pdev->device == 0x9910) ||
455                     (rdev->pdev->device == 0x9913) ||
456                     (rdev->pdev->device == 0x9917) ||
457                     (rdev->pdev->device == 0x9918)) {
458                         radeon_program_register_sequence(rdev,
459                                                          dvst_golden_registers,
460                                                          (const u32)ARRAY_SIZE(dvst_golden_registers));
461                         radeon_program_register_sequence(rdev,
462                                                          dvst_golden_registers2,
463                                                          (const u32)ARRAY_SIZE(dvst_golden_registers2));
464                 } else {
465                         radeon_program_register_sequence(rdev,
466                                                          scrapper_golden_registers,
467                                                          (const u32)ARRAY_SIZE(scrapper_golden_registers));
468                         radeon_program_register_sequence(rdev,
469                                                          dvst_golden_registers2,
470                                                          (const u32)ARRAY_SIZE(dvst_golden_registers2));
471                 }
472                 break;
473         default:
474                 break;
475         }
476 }
477
478 #define BTC_IO_MC_REGS_SIZE 29
479
480 static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
481         {0x00000077, 0xff010100},
482         {0x00000078, 0x00000000},
483         {0x00000079, 0x00001434},
484         {0x0000007a, 0xcc08ec08},
485         {0x0000007b, 0x00040000},
486         {0x0000007c, 0x000080c0},
487         {0x0000007d, 0x09000000},
488         {0x0000007e, 0x00210404},
489         {0x00000081, 0x08a8e800},
490         {0x00000082, 0x00030444},
491         {0x00000083, 0x00000000},
492         {0x00000085, 0x00000001},
493         {0x00000086, 0x00000002},
494         {0x00000087, 0x48490000},
495         {0x00000088, 0x20244647},
496         {0x00000089, 0x00000005},
497         {0x0000008b, 0x66030000},
498         {0x0000008c, 0x00006603},
499         {0x0000008d, 0x00000100},
500         {0x0000008f, 0x00001c0a},
501         {0x00000090, 0xff000001},
502         {0x00000094, 0x00101101},
503         {0x00000095, 0x00000fff},
504         {0x00000096, 0x00116fff},
505         {0x00000097, 0x60010000},
506         {0x00000098, 0x10010000},
507         {0x00000099, 0x00006000},
508         {0x0000009a, 0x00001000},
509         {0x0000009f, 0x00946a00}
510 };
511
512 static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
513         {0x00000077, 0xff010100},
514         {0x00000078, 0x00000000},
515         {0x00000079, 0x00001434},
516         {0x0000007a, 0xcc08ec08},
517         {0x0000007b, 0x00040000},
518         {0x0000007c, 0x000080c0},
519         {0x0000007d, 0x09000000},
520         {0x0000007e, 0x00210404},
521         {0x00000081, 0x08a8e800},
522         {0x00000082, 0x00030444},
523         {0x00000083, 0x00000000},
524         {0x00000085, 0x00000001},
525         {0x00000086, 0x00000002},
526         {0x00000087, 0x48490000},
527         {0x00000088, 0x20244647},
528         {0x00000089, 0x00000005},
529         {0x0000008b, 0x66030000},
530         {0x0000008c, 0x00006603},
531         {0x0000008d, 0x00000100},
532         {0x0000008f, 0x00001c0a},
533         {0x00000090, 0xff000001},
534         {0x00000094, 0x00101101},
535         {0x00000095, 0x00000fff},
536         {0x00000096, 0x00116fff},
537         {0x00000097, 0x60010000},
538         {0x00000098, 0x10010000},
539         {0x00000099, 0x00006000},
540         {0x0000009a, 0x00001000},
541         {0x0000009f, 0x00936a00}
542 };
543
544 static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
545         {0x00000077, 0xff010100},
546         {0x00000078, 0x00000000},
547         {0x00000079, 0x00001434},
548         {0x0000007a, 0xcc08ec08},
549         {0x0000007b, 0x00040000},
550         {0x0000007c, 0x000080c0},
551         {0x0000007d, 0x09000000},
552         {0x0000007e, 0x00210404},
553         {0x00000081, 0x08a8e800},
554         {0x00000082, 0x00030444},
555         {0x00000083, 0x00000000},
556         {0x00000085, 0x00000001},
557         {0x00000086, 0x00000002},
558         {0x00000087, 0x48490000},
559         {0x00000088, 0x20244647},
560         {0x00000089, 0x00000005},
561         {0x0000008b, 0x66030000},
562         {0x0000008c, 0x00006603},
563         {0x0000008d, 0x00000100},
564         {0x0000008f, 0x00001c0a},
565         {0x00000090, 0xff000001},
566         {0x00000094, 0x00101101},
567         {0x00000095, 0x00000fff},
568         {0x00000096, 0x00116fff},
569         {0x00000097, 0x60010000},
570         {0x00000098, 0x10010000},
571         {0x00000099, 0x00006000},
572         {0x0000009a, 0x00001000},
573         {0x0000009f, 0x00916a00}
574 };
575
576 static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
577         {0x00000077, 0xff010100},
578         {0x00000078, 0x00000000},
579         {0x00000079, 0x00001434},
580         {0x0000007a, 0xcc08ec08},
581         {0x0000007b, 0x00040000},
582         {0x0000007c, 0x000080c0},
583         {0x0000007d, 0x09000000},
584         {0x0000007e, 0x00210404},
585         {0x00000081, 0x08a8e800},
586         {0x00000082, 0x00030444},
587         {0x00000083, 0x00000000},
588         {0x00000085, 0x00000001},
589         {0x00000086, 0x00000002},
590         {0x00000087, 0x48490000},
591         {0x00000088, 0x20244647},
592         {0x00000089, 0x00000005},
593         {0x0000008b, 0x66030000},
594         {0x0000008c, 0x00006603},
595         {0x0000008d, 0x00000100},
596         {0x0000008f, 0x00001c0a},
597         {0x00000090, 0xff000001},
598         {0x00000094, 0x00101101},
599         {0x00000095, 0x00000fff},
600         {0x00000096, 0x00116fff},
601         {0x00000097, 0x60010000},
602         {0x00000098, 0x10010000},
603         {0x00000099, 0x00006000},
604         {0x0000009a, 0x00001000},
605         {0x0000009f, 0x00976b00}
606 };
607
608 int ni_mc_load_microcode(struct radeon_device *rdev)
609 {
610         const __be32 *fw_data;
611         u32 mem_type, running, blackout = 0;
612         u32 *io_mc_regs;
613         int i, ucode_size, regs_size;
614
615         if (!rdev->mc_fw)
616                 return -EINVAL;
617
618         switch (rdev->family) {
619         case CHIP_BARTS:
620                 io_mc_regs = (u32 *)&barts_io_mc_regs;
621                 ucode_size = BTC_MC_UCODE_SIZE;
622                 regs_size = BTC_IO_MC_REGS_SIZE;
623                 break;
624         case CHIP_TURKS:
625                 io_mc_regs = (u32 *)&turks_io_mc_regs;
626                 ucode_size = BTC_MC_UCODE_SIZE;
627                 regs_size = BTC_IO_MC_REGS_SIZE;
628                 break;
629         case CHIP_CAICOS:
630         default:
631                 io_mc_regs = (u32 *)&caicos_io_mc_regs;
632                 ucode_size = BTC_MC_UCODE_SIZE;
633                 regs_size = BTC_IO_MC_REGS_SIZE;
634                 break;
635         case CHIP_CAYMAN:
636                 io_mc_regs = (u32 *)&cayman_io_mc_regs;
637                 ucode_size = CAYMAN_MC_UCODE_SIZE;
638                 regs_size = BTC_IO_MC_REGS_SIZE;
639                 break;
640         }
641
642         mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
643         running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
644
645         if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
646                 if (running) {
647                         blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
648                         WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
649                 }
650
651                 /* reset the engine and set to writable */
652                 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
653                 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
654
655                 /* load mc io regs */
656                 for (i = 0; i < regs_size; i++) {
657                         WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
658                         WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
659                 }
660                 /* load the MC ucode */
661                 fw_data = (const __be32 *)rdev->mc_fw->data;
662                 for (i = 0; i < ucode_size; i++)
663                         WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
664
665                 /* put the engine back into the active state */
666                 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
667                 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
668                 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
669
670                 /* wait for training to complete */
671                 for (i = 0; i < rdev->usec_timeout; i++) {
672                         if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
673                                 break;
674                         udelay(1);
675                 }
676
677                 if (running)
678                         WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
679         }
680
681         return 0;
682 }
683
684 int ni_init_microcode(struct radeon_device *rdev)
685 {
686         const char *chip_name;
687         const char *rlc_chip_name;
688         size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
689         size_t smc_req_size = 0;
690         char fw_name[30];
691         int err;
692
693         DRM_DEBUG("\n");
694
695         switch (rdev->family) {
696         case CHIP_BARTS:
697                 chip_name = "BARTS";
698                 rlc_chip_name = "BTC";
699                 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
700                 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
701                 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
702                 mc_req_size = BTC_MC_UCODE_SIZE * 4;
703                 smc_req_size = ALIGN(BARTS_SMC_UCODE_SIZE, 4);
704                 break;
705         case CHIP_TURKS:
706                 chip_name = "TURKS";
707                 rlc_chip_name = "BTC";
708                 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
709                 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
710                 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
711                 mc_req_size = BTC_MC_UCODE_SIZE * 4;
712                 smc_req_size = ALIGN(TURKS_SMC_UCODE_SIZE, 4);
713                 break;
714         case CHIP_CAICOS:
715                 chip_name = "CAICOS";
716                 rlc_chip_name = "BTC";
717                 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
718                 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
719                 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
720                 mc_req_size = BTC_MC_UCODE_SIZE * 4;
721                 smc_req_size = ALIGN(CAICOS_SMC_UCODE_SIZE, 4);
722                 break;
723         case CHIP_CAYMAN:
724                 chip_name = "CAYMAN";
725                 rlc_chip_name = "CAYMAN";
726                 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
727                 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
728                 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
729                 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
730                 smc_req_size = ALIGN(CAYMAN_SMC_UCODE_SIZE, 4);
731                 break;
732         case CHIP_ARUBA:
733                 chip_name = "ARUBA";
734                 rlc_chip_name = "ARUBA";
735                 /* pfp/me same size as CAYMAN */
736                 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
737                 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
738                 rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
739                 mc_req_size = 0;
740                 break;
741         default: BUG();
742         }
743
744         DRM_INFO("Loading %s Microcode\n", chip_name);
745
746         snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
747         err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
748         if (err)
749                 goto out;
750         if (rdev->pfp_fw->size != pfp_req_size) {
751                 printk(KERN_ERR
752                        "ni_cp: Bogus length %zu in firmware \"%s\"\n",
753                        rdev->pfp_fw->size, fw_name);
754                 err = -EINVAL;
755                 goto out;
756         }
757
758         snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
759         err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
760         if (err)
761                 goto out;
762         if (rdev->me_fw->size != me_req_size) {
763                 printk(KERN_ERR
764                        "ni_cp: Bogus length %zu in firmware \"%s\"\n",
765                        rdev->me_fw->size, fw_name);
766                 err = -EINVAL;
767         }
768
769         snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
770         err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
771         if (err)
772                 goto out;
773         if (rdev->rlc_fw->size != rlc_req_size) {
774                 printk(KERN_ERR
775                        "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
776                        rdev->rlc_fw->size, fw_name);
777                 err = -EINVAL;
778         }
779
780         /* no MC ucode on TN */
781         if (!(rdev->flags & RADEON_IS_IGP)) {
782                 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
783                 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
784                 if (err)
785                         goto out;
786                 if (rdev->mc_fw->size != mc_req_size) {
787                         printk(KERN_ERR
788                                "ni_mc: Bogus length %zu in firmware \"%s\"\n",
789                                rdev->mc_fw->size, fw_name);
790                         err = -EINVAL;
791                 }
792         }
793
794         if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
795                 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
796                 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
797                 if (err) {
798                         printk(KERN_ERR
799                                "smc: error loading firmware \"%s\"\n",
800                                fw_name);
801                         release_firmware(rdev->smc_fw);
802                         rdev->smc_fw = NULL;
803                 } else if (rdev->smc_fw->size != smc_req_size) {
804                         printk(KERN_ERR
805                                "ni_mc: Bogus length %zu in firmware \"%s\"\n",
806                                rdev->mc_fw->size, fw_name);
807                         err = -EINVAL;
808                 }
809         }
810
811 out:
812         if (err) {
813                 if (err != -EINVAL)
814                         printk(KERN_ERR
815                                "ni_cp: Failed to load firmware \"%s\"\n",
816                                fw_name);
817                 release_firmware(rdev->pfp_fw);
818                 rdev->pfp_fw = NULL;
819                 release_firmware(rdev->me_fw);
820                 rdev->me_fw = NULL;
821                 release_firmware(rdev->rlc_fw);
822                 rdev->rlc_fw = NULL;
823                 release_firmware(rdev->mc_fw);
824                 rdev->mc_fw = NULL;
825         }
826         return err;
827 }
828
829 int tn_get_temp(struct radeon_device *rdev)
830 {
831         u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff;
832         int actual_temp = (temp / 8) - 49;
833
834         return actual_temp * 1000;
835 }
836
837 /*
838  * Core functions
839  */
840 static void cayman_gpu_init(struct radeon_device *rdev)
841 {
842         u32 gb_addr_config = 0;
843         u32 mc_shared_chmap, mc_arb_ramcfg;
844         u32 cgts_tcc_disable;
845         u32 sx_debug_1;
846         u32 smx_dc_ctl0;
847         u32 cgts_sm_ctrl_reg;
848         u32 hdp_host_path_cntl;
849         u32 tmp;
850         u32 disabled_rb_mask;
851         int i, j;
852
853         switch (rdev->family) {
854         case CHIP_CAYMAN:
855                 rdev->config.cayman.max_shader_engines = 2;
856                 rdev->config.cayman.max_pipes_per_simd = 4;
857                 rdev->config.cayman.max_tile_pipes = 8;
858                 rdev->config.cayman.max_simds_per_se = 12;
859                 rdev->config.cayman.max_backends_per_se = 4;
860                 rdev->config.cayman.max_texture_channel_caches = 8;
861                 rdev->config.cayman.max_gprs = 256;
862                 rdev->config.cayman.max_threads = 256;
863                 rdev->config.cayman.max_gs_threads = 32;
864                 rdev->config.cayman.max_stack_entries = 512;
865                 rdev->config.cayman.sx_num_of_sets = 8;
866                 rdev->config.cayman.sx_max_export_size = 256;
867                 rdev->config.cayman.sx_max_export_pos_size = 64;
868                 rdev->config.cayman.sx_max_export_smx_size = 192;
869                 rdev->config.cayman.max_hw_contexts = 8;
870                 rdev->config.cayman.sq_num_cf_insts = 2;
871
872                 rdev->config.cayman.sc_prim_fifo_size = 0x100;
873                 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
874                 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
875                 gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
876                 break;
877         case CHIP_ARUBA:
878         default:
879                 rdev->config.cayman.max_shader_engines = 1;
880                 rdev->config.cayman.max_pipes_per_simd = 4;
881                 rdev->config.cayman.max_tile_pipes = 2;
882                 if ((rdev->pdev->device == 0x9900) ||
883                     (rdev->pdev->device == 0x9901) ||
884                     (rdev->pdev->device == 0x9905) ||
885                     (rdev->pdev->device == 0x9906) ||
886                     (rdev->pdev->device == 0x9907) ||
887                     (rdev->pdev->device == 0x9908) ||
888                     (rdev->pdev->device == 0x9909) ||
889                     (rdev->pdev->device == 0x990B) ||
890                     (rdev->pdev->device == 0x990C) ||
891                     (rdev->pdev->device == 0x990F) ||
892                     (rdev->pdev->device == 0x9910) ||
893                     (rdev->pdev->device == 0x9917) ||
894                     (rdev->pdev->device == 0x9999) ||
895                     (rdev->pdev->device == 0x999C)) {
896                         rdev->config.cayman.max_simds_per_se = 6;
897                         rdev->config.cayman.max_backends_per_se = 2;
898                 } else if ((rdev->pdev->device == 0x9903) ||
899                            (rdev->pdev->device == 0x9904) ||
900                            (rdev->pdev->device == 0x990A) ||
901                            (rdev->pdev->device == 0x990D) ||
902                            (rdev->pdev->device == 0x990E) ||
903                            (rdev->pdev->device == 0x9913) ||
904                            (rdev->pdev->device == 0x9918) ||
905                            (rdev->pdev->device == 0x999D)) {
906                         rdev->config.cayman.max_simds_per_se = 4;
907                         rdev->config.cayman.max_backends_per_se = 2;
908                 } else if ((rdev->pdev->device == 0x9919) ||
909                            (rdev->pdev->device == 0x9990) ||
910                            (rdev->pdev->device == 0x9991) ||
911                            (rdev->pdev->device == 0x9994) ||
912                            (rdev->pdev->device == 0x9995) ||
913                            (rdev->pdev->device == 0x9996) ||
914                            (rdev->pdev->device == 0x999A) ||
915                            (rdev->pdev->device == 0x99A0)) {
916                         rdev->config.cayman.max_simds_per_se = 3;
917                         rdev->config.cayman.max_backends_per_se = 1;
918                 } else {
919                         rdev->config.cayman.max_simds_per_se = 2;
920                         rdev->config.cayman.max_backends_per_se = 1;
921                 }
922                 rdev->config.cayman.max_texture_channel_caches = 2;
923                 rdev->config.cayman.max_gprs = 256;
924                 rdev->config.cayman.max_threads = 256;
925                 rdev->config.cayman.max_gs_threads = 32;
926                 rdev->config.cayman.max_stack_entries = 512;
927                 rdev->config.cayman.sx_num_of_sets = 8;
928                 rdev->config.cayman.sx_max_export_size = 256;
929                 rdev->config.cayman.sx_max_export_pos_size = 64;
930                 rdev->config.cayman.sx_max_export_smx_size = 192;
931                 rdev->config.cayman.max_hw_contexts = 8;
932                 rdev->config.cayman.sq_num_cf_insts = 2;
933
934                 rdev->config.cayman.sc_prim_fifo_size = 0x40;
935                 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
936                 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
937                 gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
938                 break;
939         }
940
941         /* Initialize HDP */
942         for (i = 0, j = 0; i < 32; i++, j += 0x18) {
943                 WREG32((0x2c14 + j), 0x00000000);
944                 WREG32((0x2c18 + j), 0x00000000);
945                 WREG32((0x2c1c + j), 0x00000000);
946                 WREG32((0x2c20 + j), 0x00000000);
947                 WREG32((0x2c24 + j), 0x00000000);
948         }
949
950         WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
951
952         evergreen_fix_pci_max_read_req_size(rdev);
953
954         mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
955         mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
956
957         tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
958         rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
959         if (rdev->config.cayman.mem_row_size_in_kb > 4)
960                 rdev->config.cayman.mem_row_size_in_kb = 4;
961         /* XXX use MC settings? */
962         rdev->config.cayman.shader_engine_tile_size = 32;
963         rdev->config.cayman.num_gpus = 1;
964         rdev->config.cayman.multi_gpu_tile_size = 64;
965
966         tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
967         rdev->config.cayman.num_tile_pipes = (1 << tmp);
968         tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
969         rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
970         tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
971         rdev->config.cayman.num_shader_engines = tmp + 1;
972         tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
973         rdev->config.cayman.num_gpus = tmp + 1;
974         tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
975         rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
976         tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
977         rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
978
979
980         /* setup tiling info dword.  gb_addr_config is not adequate since it does
981          * not have bank info, so create a custom tiling dword.
982          * bits 3:0   num_pipes
983          * bits 7:4   num_banks
984          * bits 11:8  group_size
985          * bits 15:12 row_size
986          */
987         rdev->config.cayman.tile_config = 0;
988         switch (rdev->config.cayman.num_tile_pipes) {
989         case 1:
990         default:
991                 rdev->config.cayman.tile_config |= (0 << 0);
992                 break;
993         case 2:
994                 rdev->config.cayman.tile_config |= (1 << 0);
995                 break;
996         case 4:
997                 rdev->config.cayman.tile_config |= (2 << 0);
998                 break;
999         case 8:
1000                 rdev->config.cayman.tile_config |= (3 << 0);
1001                 break;
1002         }
1003
1004         /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
1005         if (rdev->flags & RADEON_IS_IGP)
1006                 rdev->config.cayman.tile_config |= 1 << 4;
1007         else {
1008                 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
1009                 case 0: /* four banks */
1010                         rdev->config.cayman.tile_config |= 0 << 4;
1011                         break;
1012                 case 1: /* eight banks */
1013                         rdev->config.cayman.tile_config |= 1 << 4;
1014                         break;
1015                 case 2: /* sixteen banks */
1016                 default:
1017                         rdev->config.cayman.tile_config |= 2 << 4;
1018                         break;
1019                 }
1020         }
1021         rdev->config.cayman.tile_config |=
1022                 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
1023         rdev->config.cayman.tile_config |=
1024                 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
1025
1026         tmp = 0;
1027         for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
1028                 u32 rb_disable_bitmap;
1029
1030                 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1031                 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1032                 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
1033                 tmp <<= 4;
1034                 tmp |= rb_disable_bitmap;
1035         }
1036         /* enabled rb are just the one not disabled :) */
1037         disabled_rb_mask = tmp;
1038         tmp = 0;
1039         for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
1040                 tmp |= (1 << i);
1041         /* if all the backends are disabled, fix it up here */
1042         if ((disabled_rb_mask & tmp) == tmp) {
1043                 for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
1044                         disabled_rb_mask &= ~(1 << i);
1045         }
1046
1047         WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1048         WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1049
1050         WREG32(GB_ADDR_CONFIG, gb_addr_config);
1051         WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1052         if (ASIC_IS_DCE6(rdev))
1053                 WREG32(DMIF_ADDR_CALC, gb_addr_config);
1054         WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1055         WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
1056         WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
1057         WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
1058         WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
1059         WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
1060
1061         if ((rdev->config.cayman.max_backends_per_se == 1) &&
1062             (rdev->flags & RADEON_IS_IGP)) {
1063                 if ((disabled_rb_mask & 3) == 1) {
1064                         /* RB0 disabled, RB1 enabled */
1065                         tmp = 0x11111111;
1066                 } else {
1067                         /* RB1 disabled, RB0 enabled */
1068                         tmp = 0x00000000;
1069                 }
1070         } else {
1071                 tmp = gb_addr_config & NUM_PIPES_MASK;
1072                 tmp = r6xx_remap_render_backend(rdev, tmp,
1073                                                 rdev->config.cayman.max_backends_per_se *
1074                                                 rdev->config.cayman.max_shader_engines,
1075                                                 CAYMAN_MAX_BACKENDS, disabled_rb_mask);
1076         }
1077         WREG32(GB_BACKEND_MAP, tmp);
1078
1079         cgts_tcc_disable = 0xffff0000;
1080         for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
1081                 cgts_tcc_disable &= ~(1 << (16 + i));
1082         WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
1083         WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
1084         WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
1085         WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
1086
1087         /* reprogram the shader complex */
1088         cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
1089         for (i = 0; i < 16; i++)
1090                 WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
1091         WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
1092
1093         /* set HW defaults for 3D engine */
1094         WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
1095
1096         sx_debug_1 = RREG32(SX_DEBUG_1);
1097         sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
1098         WREG32(SX_DEBUG_1, sx_debug_1);
1099
1100         smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
1101         smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
1102         smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
1103         WREG32(SMX_DC_CTL0, smx_dc_ctl0);
1104
1105         WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
1106
1107         /* need to be explicitly zero-ed */
1108         WREG32(VGT_OFFCHIP_LDS_BASE, 0);
1109         WREG32(SQ_LSTMP_RING_BASE, 0);
1110         WREG32(SQ_HSTMP_RING_BASE, 0);
1111         WREG32(SQ_ESTMP_RING_BASE, 0);
1112         WREG32(SQ_GSTMP_RING_BASE, 0);
1113         WREG32(SQ_VSTMP_RING_BASE, 0);
1114         WREG32(SQ_PSTMP_RING_BASE, 0);
1115
1116         WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
1117
1118         WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
1119                                         POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
1120                                         SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
1121
1122         WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
1123                                  SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
1124                                  SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
1125
1126
1127         WREG32(VGT_NUM_INSTANCES, 1);
1128
1129         WREG32(CP_PERFMON_CNTL, 0);
1130
1131         WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
1132                                   FETCH_FIFO_HIWATER(0x4) |
1133                                   DONE_FIFO_HIWATER(0xe0) |
1134                                   ALU_UPDATE_FIFO_HIWATER(0x8)));
1135
1136         WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
1137         WREG32(SQ_CONFIG, (VC_ENABLE |
1138                            EXPORT_SRC_C |
1139                            GFX_PRIO(0) |
1140                            CS1_PRIO(0) |
1141                            CS2_PRIO(1)));
1142         WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
1143
1144         WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
1145                                           FORCE_EOV_MAX_REZ_CNT(255)));
1146
1147         WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
1148                AUTO_INVLD_EN(ES_AND_GS_AUTO));
1149
1150         WREG32(VGT_GS_VERTEX_REUSE, 16);
1151         WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1152
1153         WREG32(CB_PERF_CTR0_SEL_0, 0);
1154         WREG32(CB_PERF_CTR0_SEL_1, 0);
1155         WREG32(CB_PERF_CTR1_SEL_0, 0);
1156         WREG32(CB_PERF_CTR1_SEL_1, 0);
1157         WREG32(CB_PERF_CTR2_SEL_0, 0);
1158         WREG32(CB_PERF_CTR2_SEL_1, 0);
1159         WREG32(CB_PERF_CTR3_SEL_0, 0);
1160         WREG32(CB_PERF_CTR3_SEL_1, 0);
1161
1162         tmp = RREG32(HDP_MISC_CNTL);
1163         tmp |= HDP_FLUSH_INVALIDATE_CACHE;
1164         WREG32(HDP_MISC_CNTL, tmp);
1165
1166         hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
1167         WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1168
1169         WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
1170
1171         udelay(50);
1172
1173         /* set clockgating golden values on TN */
1174         if (rdev->family == CHIP_ARUBA) {
1175                 tmp = RREG32_CG(CG_CGTT_LOCAL_0);
1176                 tmp &= ~0x00380000;
1177                 WREG32_CG(CG_CGTT_LOCAL_0, tmp);
1178                 tmp = RREG32_CG(CG_CGTT_LOCAL_1);
1179                 tmp &= ~0x0e000000;
1180                 WREG32_CG(CG_CGTT_LOCAL_1, tmp);
1181         }
1182 }
1183
1184 /*
1185  * GART
1186  */
1187 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
1188 {
1189         /* flush hdp cache */
1190         WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1191
1192         /* bits 0-7 are the VM contexts0-7 */
1193         WREG32(VM_INVALIDATE_REQUEST, 1);
1194 }
1195
1196 static int cayman_pcie_gart_enable(struct radeon_device *rdev)
1197 {
1198         int i, r;
1199
1200         if (rdev->gart.robj == NULL) {
1201                 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1202                 return -EINVAL;
1203         }
1204         r = radeon_gart_table_vram_pin(rdev);
1205         if (r)
1206                 return r;
1207         radeon_gart_restore(rdev);
1208         /* Setup TLB control */
1209         WREG32(MC_VM_MX_L1_TLB_CNTL,
1210                (0xA << 7) |
1211                ENABLE_L1_TLB |
1212                ENABLE_L1_FRAGMENT_PROCESSING |
1213                SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1214                ENABLE_ADVANCED_DRIVER_MODEL |
1215                SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
1216         /* Setup L2 cache */
1217         WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
1218                ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1219                ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
1220                EFFECTIVE_L2_QUEUE_SIZE(7) |
1221                CONTEXT1_IDENTITY_ACCESS_MODE(1));
1222         WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
1223         WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
1224                L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1225         /* setup context0 */
1226         WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1227         WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1228         WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1229         WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1230                         (u32)(rdev->dummy_page.addr >> 12));
1231         WREG32(VM_CONTEXT0_CNTL2, 0);
1232         WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1233                                 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1234
1235         WREG32(0x15D4, 0);
1236         WREG32(0x15D8, 0);
1237         WREG32(0x15DC, 0);
1238
1239         /* empty context1-7 */
1240         /* Assign the pt base to something valid for now; the pts used for
1241          * the VMs are determined by the application and setup and assigned
1242          * on the fly in the vm part of radeon_gart.c
1243          */
1244         for (i = 1; i < 8; i++) {
1245                 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
1246                 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
1247                 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
1248                         rdev->gart.table_addr >> 12);
1249         }
1250
1251         /* enable context1-7 */
1252         WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
1253                (u32)(rdev->dummy_page.addr >> 12));
1254         WREG32(VM_CONTEXT1_CNTL2, 4);
1255         WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
1256                                 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1257                                 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
1258                                 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1259                                 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
1260                                 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
1261                                 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
1262                                 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
1263                                 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
1264                                 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
1265                                 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
1266                                 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1267                                 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
1268
1269         cayman_pcie_gart_tlb_flush(rdev);
1270         DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1271                  (unsigned)(rdev->mc.gtt_size >> 20),
1272                  (unsigned long long)rdev->gart.table_addr);
1273         rdev->gart.ready = true;
1274         return 0;
1275 }
1276
1277 static void cayman_pcie_gart_disable(struct radeon_device *rdev)
1278 {
1279         /* Disable all tables */
1280         WREG32(VM_CONTEXT0_CNTL, 0);
1281         WREG32(VM_CONTEXT1_CNTL, 0);
1282         /* Setup TLB control */
1283         WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
1284                SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1285                SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
1286         /* Setup L2 cache */
1287         WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1288                ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
1289                EFFECTIVE_L2_QUEUE_SIZE(7) |
1290                CONTEXT1_IDENTITY_ACCESS_MODE(1));
1291         WREG32(VM_L2_CNTL2, 0);
1292         WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
1293                L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1294         radeon_gart_table_vram_unpin(rdev);
1295 }
1296
1297 static void cayman_pcie_gart_fini(struct radeon_device *rdev)
1298 {
1299         cayman_pcie_gart_disable(rdev);
1300         radeon_gart_table_vram_free(rdev);
1301         radeon_gart_fini(rdev);
1302 }
1303
1304 void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
1305                               int ring, u32 cp_int_cntl)
1306 {
1307         u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
1308
1309         WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
1310         WREG32(CP_INT_CNTL, cp_int_cntl);
1311 }
1312
1313 /*
1314  * CP.
1315  */
1316 void cayman_fence_ring_emit(struct radeon_device *rdev,
1317                             struct radeon_fence *fence)
1318 {
1319         struct radeon_ring *ring = &rdev->ring[fence->ring];
1320         u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
1321
1322         /* flush read cache over gart for this vmid */
1323         radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1324         radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
1325         radeon_ring_write(ring, 0);
1326         radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1327         radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
1328         radeon_ring_write(ring, 0xFFFFFFFF);
1329         radeon_ring_write(ring, 0);
1330         radeon_ring_write(ring, 10); /* poll interval */
1331         /* EVENT_WRITE_EOP - flush caches, send int */
1332         radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1333         radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
1334         radeon_ring_write(ring, addr & 0xffffffff);
1335         radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
1336         radeon_ring_write(ring, fence->seq);
1337         radeon_ring_write(ring, 0);
1338 }
1339
1340 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1341 {
1342         struct radeon_ring *ring = &rdev->ring[ib->ring];
1343
1344         /* set to DX10/11 mode */
1345         radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1346         radeon_ring_write(ring, 1);
1347
1348         if (ring->rptr_save_reg) {
1349                 uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
1350                 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1351                 radeon_ring_write(ring, ((ring->rptr_save_reg - 
1352                                           PACKET3_SET_CONFIG_REG_START) >> 2));
1353                 radeon_ring_write(ring, next_rptr);
1354         }
1355
1356         radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1357         radeon_ring_write(ring,
1358 #ifdef __BIG_ENDIAN
1359                           (2 << 0) |
1360 #endif
1361                           (ib->gpu_addr & 0xFFFFFFFC));
1362         radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
1363         radeon_ring_write(ring, ib->length_dw | 
1364                           (ib->vm ? (ib->vm->id << 24) : 0));
1365
1366         /* flush read cache over gart for this vmid */
1367         radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1368         radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
1369         radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
1370         radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1371         radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
1372         radeon_ring_write(ring, 0xFFFFFFFF);
1373         radeon_ring_write(ring, 0);
1374         radeon_ring_write(ring, 10); /* poll interval */
1375 }
1376
1377 void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
1378                                struct radeon_ring *ring,
1379                                struct radeon_semaphore *semaphore,
1380                                bool emit_wait)
1381 {
1382         uint64_t addr = semaphore->gpu_addr;
1383
1384         radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
1385         radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
1386
1387         radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
1388         radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
1389
1390         radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
1391         radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
1392 }
1393
1394 static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
1395 {
1396         if (enable)
1397                 WREG32(CP_ME_CNTL, 0);
1398         else {
1399                 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1400                 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
1401                 WREG32(SCRATCH_UMSK, 0);
1402                 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1403         }
1404 }
1405
1406 static int cayman_cp_load_microcode(struct radeon_device *rdev)
1407 {
1408         const __be32 *fw_data;
1409         int i;
1410
1411         if (!rdev->me_fw || !rdev->pfp_fw)
1412                 return -EINVAL;
1413
1414         cayman_cp_enable(rdev, false);
1415
1416         fw_data = (const __be32 *)rdev->pfp_fw->data;
1417         WREG32(CP_PFP_UCODE_ADDR, 0);
1418         for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
1419                 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1420         WREG32(CP_PFP_UCODE_ADDR, 0);
1421
1422         fw_data = (const __be32 *)rdev->me_fw->data;
1423         WREG32(CP_ME_RAM_WADDR, 0);
1424         for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
1425                 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1426
1427         WREG32(CP_PFP_UCODE_ADDR, 0);
1428         WREG32(CP_ME_RAM_WADDR, 0);
1429         WREG32(CP_ME_RAM_RADDR, 0);
1430         return 0;
1431 }
1432
1433 static int cayman_cp_start(struct radeon_device *rdev)
1434 {
1435         struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1436         int r, i;
1437
1438         r = radeon_ring_lock(rdev, ring, 7);
1439         if (r) {
1440                 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1441                 return r;
1442         }
1443         radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1444         radeon_ring_write(ring, 0x1);
1445         radeon_ring_write(ring, 0x0);
1446         radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
1447         radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1448         radeon_ring_write(ring, 0);
1449         radeon_ring_write(ring, 0);
1450         radeon_ring_unlock_commit(rdev, ring);
1451
1452         cayman_cp_enable(rdev, true);
1453
1454         r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
1455         if (r) {
1456                 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1457                 return r;
1458         }
1459
1460         /* setup clear context state */
1461         radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1462         radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1463
1464         for (i = 0; i < cayman_default_size; i++)
1465                 radeon_ring_write(ring, cayman_default_state[i]);
1466
1467         radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1468         radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
1469
1470         /* set clear context state */
1471         radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1472         radeon_ring_write(ring, 0);
1473
1474         /* SQ_VTX_BASE_VTX_LOC */
1475         radeon_ring_write(ring, 0xc0026f00);
1476         radeon_ring_write(ring, 0x00000000);
1477         radeon_ring_write(ring, 0x00000000);
1478         radeon_ring_write(ring, 0x00000000);
1479
1480         /* Clear consts */
1481         radeon_ring_write(ring, 0xc0036f00);
1482         radeon_ring_write(ring, 0x00000bc4);
1483         radeon_ring_write(ring, 0xffffffff);
1484         radeon_ring_write(ring, 0xffffffff);
1485         radeon_ring_write(ring, 0xffffffff);
1486
1487         radeon_ring_write(ring, 0xc0026900);
1488         radeon_ring_write(ring, 0x00000316);
1489         radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1490         radeon_ring_write(ring, 0x00000010); /*  */
1491
1492         radeon_ring_unlock_commit(rdev, ring);
1493
1494         /* XXX init other rings */
1495
1496         return 0;
1497 }
1498
1499 static void cayman_cp_fini(struct radeon_device *rdev)
1500 {
1501         struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1502         cayman_cp_enable(rdev, false);
1503         radeon_ring_fini(rdev, ring);
1504         radeon_scratch_free(rdev, ring->rptr_save_reg);
1505 }
1506
1507 static int cayman_cp_resume(struct radeon_device *rdev)
1508 {
1509         static const int ridx[] = {
1510                 RADEON_RING_TYPE_GFX_INDEX,
1511                 CAYMAN_RING_TYPE_CP1_INDEX,
1512                 CAYMAN_RING_TYPE_CP2_INDEX
1513         };
1514         static const unsigned cp_rb_cntl[] = {
1515                 CP_RB0_CNTL,
1516                 CP_RB1_CNTL,
1517                 CP_RB2_CNTL,
1518         };
1519         static const unsigned cp_rb_rptr_addr[] = {
1520                 CP_RB0_RPTR_ADDR,
1521                 CP_RB1_RPTR_ADDR,
1522                 CP_RB2_RPTR_ADDR
1523         };
1524         static const unsigned cp_rb_rptr_addr_hi[] = {
1525                 CP_RB0_RPTR_ADDR_HI,
1526                 CP_RB1_RPTR_ADDR_HI,
1527                 CP_RB2_RPTR_ADDR_HI
1528         };
1529         static const unsigned cp_rb_base[] = {
1530                 CP_RB0_BASE,
1531                 CP_RB1_BASE,
1532                 CP_RB2_BASE
1533         };
1534         struct radeon_ring *ring;
1535         int i, r;
1536
1537         /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1538         WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1539                                  SOFT_RESET_PA |
1540                                  SOFT_RESET_SH |
1541                                  SOFT_RESET_VGT |
1542                                  SOFT_RESET_SPI |
1543                                  SOFT_RESET_SX));
1544         RREG32(GRBM_SOFT_RESET);
1545         mdelay(15);
1546         WREG32(GRBM_SOFT_RESET, 0);
1547         RREG32(GRBM_SOFT_RESET);
1548
1549         WREG32(CP_SEM_WAIT_TIMER, 0x0);
1550         WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
1551
1552         /* Set the write pointer delay */
1553         WREG32(CP_RB_WPTR_DELAY, 0);
1554
1555         WREG32(CP_DEBUG, (1 << 27));
1556
1557         /* set the wb address whether it's enabled or not */
1558         WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1559         WREG32(SCRATCH_UMSK, 0xff);
1560
1561         for (i = 0; i < 3; ++i) {
1562                 uint32_t rb_cntl;
1563                 uint64_t addr;
1564
1565                 /* Set ring buffer size */
1566                 ring = &rdev->ring[ridx[i]];
1567                 rb_cntl = drm_order(ring->ring_size / 8);
1568                 rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
1569 #ifdef __BIG_ENDIAN
1570                 rb_cntl |= BUF_SWAP_32BIT;
1571 #endif
1572                 WREG32(cp_rb_cntl[i], rb_cntl);
1573
1574                 /* set the wb address whether it's enabled or not */
1575                 addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
1576                 WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
1577                 WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
1578         }
1579
1580         /* set the rb base addr, this causes an internal reset of ALL rings */
1581         for (i = 0; i < 3; ++i) {
1582                 ring = &rdev->ring[ridx[i]];
1583                 WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
1584         }
1585
1586         for (i = 0; i < 3; ++i) {
1587                 /* Initialize the ring buffer's read and write pointers */
1588                 ring = &rdev->ring[ridx[i]];
1589                 WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
1590
1591                 ring->rptr = ring->wptr = 0;
1592                 WREG32(ring->rptr_reg, ring->rptr);
1593                 WREG32(ring->wptr_reg, ring->wptr);
1594
1595                 mdelay(1);
1596                 WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
1597         }
1598
1599         /* start the rings */
1600         cayman_cp_start(rdev);
1601         rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1602         rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1603         rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1604         /* this only test cp0 */
1605         r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1606         if (r) {
1607                 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1608                 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1609                 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1610                 return r;
1611         }
1612
1613         return 0;
1614 }
1615
1616 /*
1617  * DMA
1618  * Starting with R600, the GPU has an asynchronous
1619  * DMA engine.  The programming model is very similar
1620  * to the 3D engine (ring buffer, IBs, etc.), but the
1621  * DMA controller has it's own packet format that is
1622  * different form the PM4 format used by the 3D engine.
1623  * It supports copying data, writing embedded data,
1624  * solid fills, and a number of other things.  It also
1625  * has support for tiling/detiling of buffers.
1626  * Cayman and newer support two asynchronous DMA engines.
1627  */
1628 /**
1629  * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
1630  *
1631  * @rdev: radeon_device pointer
1632  * @ib: IB object to schedule
1633  *
1634  * Schedule an IB in the DMA ring (cayman-SI).
1635  */
1636 void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
1637                                 struct radeon_ib *ib)
1638 {
1639         struct radeon_ring *ring = &rdev->ring[ib->ring];
1640
1641         if (rdev->wb.enabled) {
1642                 u32 next_rptr = ring->wptr + 4;
1643                 while ((next_rptr & 7) != 5)
1644                         next_rptr++;
1645                 next_rptr += 3;
1646                 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
1647                 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
1648                 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
1649                 radeon_ring_write(ring, next_rptr);
1650         }
1651
1652         /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
1653          * Pad as necessary with NOPs.
1654          */
1655         while ((ring->wptr & 7) != 5)
1656                 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1657         radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
1658         radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
1659         radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
1660
1661 }
1662
1663 /**
1664  * cayman_dma_stop - stop the async dma engines
1665  *
1666  * @rdev: radeon_device pointer
1667  *
1668  * Stop the async dma engines (cayman-SI).
1669  */
1670 void cayman_dma_stop(struct radeon_device *rdev)
1671 {
1672         u32 rb_cntl;
1673
1674         radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1675
1676         /* dma0 */
1677         rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1678         rb_cntl &= ~DMA_RB_ENABLE;
1679         WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
1680
1681         /* dma1 */
1682         rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1683         rb_cntl &= ~DMA_RB_ENABLE;
1684         WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
1685
1686         rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
1687         rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
1688 }
1689
1690 /**
1691  * cayman_dma_resume - setup and start the async dma engines
1692  *
1693  * @rdev: radeon_device pointer
1694  *
1695  * Set up the DMA ring buffers and enable them. (cayman-SI).
1696  * Returns 0 for success, error for failure.
1697  */
1698 int cayman_dma_resume(struct radeon_device *rdev)
1699 {
1700         struct radeon_ring *ring;
1701         u32 rb_cntl, dma_cntl, ib_cntl;
1702         u32 rb_bufsz;
1703         u32 reg_offset, wb_offset;
1704         int i, r;
1705
1706         /* Reset dma */
1707         WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
1708         RREG32(SRBM_SOFT_RESET);
1709         udelay(50);
1710         WREG32(SRBM_SOFT_RESET, 0);
1711
1712         for (i = 0; i < 2; i++) {
1713                 if (i == 0) {
1714                         ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1715                         reg_offset = DMA0_REGISTER_OFFSET;
1716                         wb_offset = R600_WB_DMA_RPTR_OFFSET;
1717                 } else {
1718                         ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1719                         reg_offset = DMA1_REGISTER_OFFSET;
1720                         wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
1721                 }
1722
1723                 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
1724                 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
1725
1726                 /* Set ring buffer size in dwords */
1727                 rb_bufsz = drm_order(ring->ring_size / 4);
1728                 rb_cntl = rb_bufsz << 1;
1729 #ifdef __BIG_ENDIAN
1730                 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
1731 #endif
1732                 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
1733
1734                 /* Initialize the ring buffer's read and write pointers */
1735                 WREG32(DMA_RB_RPTR + reg_offset, 0);
1736                 WREG32(DMA_RB_WPTR + reg_offset, 0);
1737
1738                 /* set the wb address whether it's enabled or not */
1739                 WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
1740                        upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
1741                 WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
1742                        ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
1743
1744                 if (rdev->wb.enabled)
1745                         rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
1746
1747                 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
1748
1749                 /* enable DMA IBs */
1750                 ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
1751 #ifdef __BIG_ENDIAN
1752                 ib_cntl |= DMA_IB_SWAP_ENABLE;
1753 #endif
1754                 WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
1755
1756                 dma_cntl = RREG32(DMA_CNTL + reg_offset);
1757                 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
1758                 WREG32(DMA_CNTL + reg_offset, dma_cntl);
1759
1760                 ring->wptr = 0;
1761                 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
1762
1763                 ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
1764
1765                 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
1766
1767                 ring->ready = true;
1768
1769                 r = radeon_ring_test(rdev, ring->idx, ring);
1770                 if (r) {
1771                         ring->ready = false;
1772                         return r;
1773                 }
1774         }
1775
1776         radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1777
1778         return 0;
1779 }
1780
1781 /**
1782  * cayman_dma_fini - tear down the async dma engines
1783  *
1784  * @rdev: radeon_device pointer
1785  *
1786  * Stop the async dma engines and free the rings (cayman-SI).
1787  */
1788 void cayman_dma_fini(struct radeon_device *rdev)
1789 {
1790         cayman_dma_stop(rdev);
1791         radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
1792         radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
1793 }
1794
1795 static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
1796 {
1797         u32 reset_mask = 0;
1798         u32 tmp;
1799
1800         /* GRBM_STATUS */
1801         tmp = RREG32(GRBM_STATUS);
1802         if (tmp & (PA_BUSY | SC_BUSY |
1803                    SH_BUSY | SX_BUSY |
1804                    TA_BUSY | VGT_BUSY |
1805                    DB_BUSY | CB_BUSY |
1806                    GDS_BUSY | SPI_BUSY |
1807                    IA_BUSY | IA_BUSY_NO_DMA))
1808                 reset_mask |= RADEON_RESET_GFX;
1809
1810         if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
1811                    CP_BUSY | CP_COHERENCY_BUSY))
1812                 reset_mask |= RADEON_RESET_CP;
1813
1814         if (tmp & GRBM_EE_BUSY)
1815                 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1816
1817         /* DMA_STATUS_REG 0 */
1818         tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
1819         if (!(tmp & DMA_IDLE))
1820                 reset_mask |= RADEON_RESET_DMA;
1821
1822         /* DMA_STATUS_REG 1 */
1823         tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
1824         if (!(tmp & DMA_IDLE))
1825                 reset_mask |= RADEON_RESET_DMA1;
1826
1827         /* SRBM_STATUS2 */
1828         tmp = RREG32(SRBM_STATUS2);
1829         if (tmp & DMA_BUSY)
1830                 reset_mask |= RADEON_RESET_DMA;
1831
1832         if (tmp & DMA1_BUSY)
1833                 reset_mask |= RADEON_RESET_DMA1;
1834
1835         /* SRBM_STATUS */
1836         tmp = RREG32(SRBM_STATUS);
1837         if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
1838                 reset_mask |= RADEON_RESET_RLC;
1839
1840         if (tmp & IH_BUSY)
1841                 reset_mask |= RADEON_RESET_IH;
1842
1843         if (tmp & SEM_BUSY)
1844                 reset_mask |= RADEON_RESET_SEM;
1845
1846         if (tmp & GRBM_RQ_PENDING)
1847                 reset_mask |= RADEON_RESET_GRBM;
1848
1849         if (tmp & VMC_BUSY)
1850                 reset_mask |= RADEON_RESET_VMC;
1851
1852         if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
1853                    MCC_BUSY | MCD_BUSY))
1854                 reset_mask |= RADEON_RESET_MC;
1855
1856         if (evergreen_is_display_hung(rdev))
1857                 reset_mask |= RADEON_RESET_DISPLAY;
1858
1859         /* VM_L2_STATUS */
1860         tmp = RREG32(VM_L2_STATUS);
1861         if (tmp & L2_BUSY)
1862                 reset_mask |= RADEON_RESET_VMC;
1863
1864         /* Skip MC reset as it's mostly likely not hung, just busy */
1865         if (reset_mask & RADEON_RESET_MC) {
1866                 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1867                 reset_mask &= ~RADEON_RESET_MC;
1868         }
1869
1870         return reset_mask;
1871 }
1872
1873 static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1874 {
1875         struct evergreen_mc_save save;
1876         u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1877         u32 tmp;
1878
1879         if (reset_mask == 0)
1880                 return;
1881
1882         dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1883
1884         evergreen_print_gpu_status_regs(rdev);
1885         dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_ADDR   0x%08X\n",
1886                  RREG32(0x14F8));
1887         dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
1888                  RREG32(0x14D8));
1889         dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1890                  RREG32(0x14FC));
1891         dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1892                  RREG32(0x14DC));
1893
1894         /* Disable CP parsing/prefetching */
1895         WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
1896
1897         if (reset_mask & RADEON_RESET_DMA) {
1898                 /* dma0 */
1899                 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1900                 tmp &= ~DMA_RB_ENABLE;
1901                 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
1902         }
1903
1904         if (reset_mask & RADEON_RESET_DMA1) {
1905                 /* dma1 */
1906                 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1907                 tmp &= ~DMA_RB_ENABLE;
1908                 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
1909         }
1910
1911         udelay(50);
1912
1913         evergreen_mc_stop(rdev, &save);
1914         if (evergreen_mc_wait_for_idle(rdev)) {
1915                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1916         }
1917
1918         if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1919                 grbm_soft_reset = SOFT_RESET_CB |
1920                         SOFT_RESET_DB |
1921                         SOFT_RESET_GDS |
1922                         SOFT_RESET_PA |
1923                         SOFT_RESET_SC |
1924                         SOFT_RESET_SPI |
1925                         SOFT_RESET_SH |
1926                         SOFT_RESET_SX |
1927                         SOFT_RESET_TC |
1928                         SOFT_RESET_TA |
1929                         SOFT_RESET_VGT |
1930                         SOFT_RESET_IA;
1931         }
1932
1933         if (reset_mask & RADEON_RESET_CP) {
1934                 grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
1935
1936                 srbm_soft_reset |= SOFT_RESET_GRBM;
1937         }
1938
1939         if (reset_mask & RADEON_RESET_DMA)
1940                 srbm_soft_reset |= SOFT_RESET_DMA;
1941
1942         if (reset_mask & RADEON_RESET_DMA1)
1943                 srbm_soft_reset |= SOFT_RESET_DMA1;
1944
1945         if (reset_mask & RADEON_RESET_DISPLAY)
1946                 srbm_soft_reset |= SOFT_RESET_DC;
1947
1948         if (reset_mask & RADEON_RESET_RLC)
1949                 srbm_soft_reset |= SOFT_RESET_RLC;
1950
1951         if (reset_mask & RADEON_RESET_SEM)
1952                 srbm_soft_reset |= SOFT_RESET_SEM;
1953
1954         if (reset_mask & RADEON_RESET_IH)
1955                 srbm_soft_reset |= SOFT_RESET_IH;
1956
1957         if (reset_mask & RADEON_RESET_GRBM)
1958                 srbm_soft_reset |= SOFT_RESET_GRBM;
1959
1960         if (reset_mask & RADEON_RESET_VMC)
1961                 srbm_soft_reset |= SOFT_RESET_VMC;
1962
1963         if (!(rdev->flags & RADEON_IS_IGP)) {
1964                 if (reset_mask & RADEON_RESET_MC)
1965                         srbm_soft_reset |= SOFT_RESET_MC;
1966         }
1967
1968         if (grbm_soft_reset) {
1969                 tmp = RREG32(GRBM_SOFT_RESET);
1970                 tmp |= grbm_soft_reset;
1971                 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
1972                 WREG32(GRBM_SOFT_RESET, tmp);
1973                 tmp = RREG32(GRBM_SOFT_RESET);
1974
1975                 udelay(50);
1976
1977                 tmp &= ~grbm_soft_reset;
1978                 WREG32(GRBM_SOFT_RESET, tmp);
1979                 tmp = RREG32(GRBM_SOFT_RESET);
1980         }
1981
1982         if (srbm_soft_reset) {
1983                 tmp = RREG32(SRBM_SOFT_RESET);
1984                 tmp |= srbm_soft_reset;
1985                 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1986                 WREG32(SRBM_SOFT_RESET, tmp);
1987                 tmp = RREG32(SRBM_SOFT_RESET);
1988
1989                 udelay(50);
1990
1991                 tmp &= ~srbm_soft_reset;
1992                 WREG32(SRBM_SOFT_RESET, tmp);
1993                 tmp = RREG32(SRBM_SOFT_RESET);
1994         }
1995
1996         /* Wait a little for things to settle down */
1997         udelay(50);
1998
1999         evergreen_mc_resume(rdev, &save);
2000         udelay(50);
2001
2002         evergreen_print_gpu_status_regs(rdev);
2003 }
2004
2005 int cayman_asic_reset(struct radeon_device *rdev)
2006 {
2007         u32 reset_mask;
2008
2009         reset_mask = cayman_gpu_check_soft_reset(rdev);
2010
2011         if (reset_mask)
2012                 r600_set_bios_scratch_engine_hung(rdev, true);
2013
2014         cayman_gpu_soft_reset(rdev, reset_mask);
2015
2016         reset_mask = cayman_gpu_check_soft_reset(rdev);
2017
2018         if (!reset_mask)
2019                 r600_set_bios_scratch_engine_hung(rdev, false);
2020
2021         return 0;
2022 }
2023
2024 /**
2025  * cayman_gfx_is_lockup - Check if the GFX engine is locked up
2026  *
2027  * @rdev: radeon_device pointer
2028  * @ring: radeon_ring structure holding ring information
2029  *
2030  * Check if the GFX engine is locked up.
2031  * Returns true if the engine appears to be locked up, false if not.
2032  */
2033 bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2034 {
2035         u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
2036
2037         if (!(reset_mask & (RADEON_RESET_GFX |
2038                             RADEON_RESET_COMPUTE |
2039                             RADEON_RESET_CP))) {
2040                 radeon_ring_lockup_update(ring);
2041                 return false;
2042         }
2043         /* force CP activities */
2044         radeon_ring_force_activity(rdev, ring);
2045         return radeon_ring_test_lockup(rdev, ring);
2046 }
2047
2048 /**
2049  * cayman_dma_is_lockup - Check if the DMA engine is locked up
2050  *
2051  * @rdev: radeon_device pointer
2052  * @ring: radeon_ring structure holding ring information
2053  *
2054  * Check if the async DMA engine is locked up.
2055  * Returns true if the engine appears to be locked up, false if not.
2056  */
2057 bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2058 {
2059         u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
2060         u32 mask;
2061
2062         if (ring->idx == R600_RING_TYPE_DMA_INDEX)
2063                 mask = RADEON_RESET_DMA;
2064         else
2065                 mask = RADEON_RESET_DMA1;
2066
2067         if (!(reset_mask & mask)) {
2068                 radeon_ring_lockup_update(ring);
2069                 return false;
2070         }
2071         /* force ring activities */
2072         radeon_ring_force_activity(rdev, ring);
2073         return radeon_ring_test_lockup(rdev, ring);
2074 }
2075
2076 static int cayman_startup(struct radeon_device *rdev)
2077 {
2078         struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2079         int r;
2080
2081         /* enable pcie gen2 link */
2082         evergreen_pcie_gen2_enable(rdev);
2083         /* enable aspm */
2084         evergreen_program_aspm(rdev);
2085
2086         evergreen_mc_program(rdev);
2087
2088         if (rdev->flags & RADEON_IS_IGP) {
2089                 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2090                         r = ni_init_microcode(rdev);
2091                         if (r) {
2092                                 DRM_ERROR("Failed to load firmware!\n");
2093                                 return r;
2094                         }
2095                 }
2096         } else {
2097                 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
2098                         r = ni_init_microcode(rdev);
2099                         if (r) {
2100                                 DRM_ERROR("Failed to load firmware!\n");
2101                                 return r;
2102                         }
2103                 }
2104
2105                 r = ni_mc_load_microcode(rdev);
2106                 if (r) {
2107                         DRM_ERROR("Failed to load MC firmware!\n");
2108                         return r;
2109                 }
2110         }
2111
2112         r = r600_vram_scratch_init(rdev);
2113         if (r)
2114                 return r;
2115
2116         r = cayman_pcie_gart_enable(rdev);
2117         if (r)
2118                 return r;
2119         cayman_gpu_init(rdev);
2120
2121         r = evergreen_blit_init(rdev);
2122         if (r) {
2123                 r600_blit_fini(rdev);
2124                 rdev->asic->copy.copy = NULL;
2125                 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2126         }
2127
2128         /* allocate rlc buffers */
2129         if (rdev->flags & RADEON_IS_IGP) {
2130                 rdev->rlc.reg_list = tn_rlc_save_restore_register_list;
2131                 rdev->rlc.reg_list_size = tn_rlc_save_restore_register_list_size;
2132                 rdev->rlc.cs_data = cayman_cs_data;
2133                 r = sumo_rlc_init(rdev);
2134                 if (r) {
2135                         DRM_ERROR("Failed to init rlc BOs!\n");
2136                         return r;
2137                 }
2138         }
2139
2140         /* allocate wb buffer */
2141         r = radeon_wb_init(rdev);
2142         if (r)
2143                 return r;
2144
2145         r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
2146         if (r) {
2147                 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2148                 return r;
2149         }
2150
2151         r = rv770_uvd_resume(rdev);
2152         if (!r) {
2153                 r = radeon_fence_driver_start_ring(rdev,
2154                                                    R600_RING_TYPE_UVD_INDEX);
2155                 if (r)
2156                         dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
2157         }
2158         if (r)
2159                 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
2160
2161         r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
2162         if (r) {
2163                 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2164                 return r;
2165         }
2166
2167         r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
2168         if (r) {
2169                 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2170                 return r;
2171         }
2172
2173         r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
2174         if (r) {
2175                 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2176                 return r;
2177         }
2178
2179         r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
2180         if (r) {
2181                 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2182                 return r;
2183         }
2184
2185         /* Enable IRQ */
2186         if (!rdev->irq.installed) {
2187                 r = radeon_irq_kms_init(rdev);
2188                 if (r)
2189                         return r;
2190         }
2191
2192         r = r600_irq_init(rdev);
2193         if (r) {
2194                 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2195                 radeon_irq_kms_fini(rdev);
2196                 return r;
2197         }
2198         evergreen_irq_set(rdev);
2199
2200         r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2201                              CP_RB0_RPTR, CP_RB0_WPTR,
2202                              0, 0xfffff, RADEON_CP_PACKET2);
2203         if (r)
2204                 return r;
2205
2206         ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2207         r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
2208                              DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
2209                              DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
2210                              2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2211         if (r)
2212                 return r;
2213
2214         ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
2215         r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
2216                              DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
2217                              DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
2218                              2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2219         if (r)
2220                 return r;
2221
2222         r = cayman_cp_load_microcode(rdev);
2223         if (r)
2224                 return r;
2225         r = cayman_cp_resume(rdev);
2226         if (r)
2227                 return r;
2228
2229         r = cayman_dma_resume(rdev);
2230         if (r)
2231                 return r;
2232
2233         ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2234         if (ring->ring_size) {
2235                 r = radeon_ring_init(rdev, ring, ring->ring_size,
2236                                      R600_WB_UVD_RPTR_OFFSET,
2237                                      UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
2238                                      0, 0xfffff, RADEON_CP_PACKET2);
2239                 if (!r)
2240                         r = r600_uvd_init(rdev);
2241                 if (r)
2242                         DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
2243         }
2244
2245         r = radeon_ib_pool_init(rdev);
2246         if (r) {
2247                 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2248                 return r;
2249         }
2250
2251         r = radeon_vm_manager_init(rdev);
2252         if (r) {
2253                 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
2254                 return r;
2255         }
2256
2257         r = r600_audio_init(rdev);
2258         if (r)
2259                 return r;
2260
2261         return 0;
2262 }
2263
2264 int cayman_resume(struct radeon_device *rdev)
2265 {
2266         int r;
2267
2268         /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
2269          * posting will perform necessary task to bring back GPU into good
2270          * shape.
2271          */
2272         /* post card */
2273         atom_asic_init(rdev->mode_info.atom_context);
2274
2275         /* init golden registers */
2276         ni_init_golden_registers(rdev);
2277
2278         rdev->accel_working = true;
2279         r = cayman_startup(rdev);
2280         if (r) {
2281                 DRM_ERROR("cayman startup failed on resume\n");
2282                 rdev->accel_working = false;
2283                 return r;
2284         }
2285         return r;
2286 }
2287
2288 int cayman_suspend(struct radeon_device *rdev)
2289 {
2290         r600_audio_fini(rdev);
2291         radeon_vm_manager_fini(rdev);
2292         cayman_cp_enable(rdev, false);
2293         cayman_dma_stop(rdev);
2294         r600_uvd_stop(rdev);
2295         radeon_uvd_suspend(rdev);
2296         evergreen_irq_suspend(rdev);
2297         radeon_wb_disable(rdev);
2298         cayman_pcie_gart_disable(rdev);
2299         return 0;
2300 }
2301
2302 /* Plan is to move initialization in that function and use
2303  * helper function so that radeon_device_init pretty much
2304  * do nothing more than calling asic specific function. This
2305  * should also allow to remove a bunch of callback function
2306  * like vram_info.
2307  */
2308 int cayman_init(struct radeon_device *rdev)
2309 {
2310         struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2311         int r;
2312
2313         /* Read BIOS */
2314         if (!radeon_get_bios(rdev)) {
2315                 if (ASIC_IS_AVIVO(rdev))
2316                         return -EINVAL;
2317         }
2318         /* Must be an ATOMBIOS */
2319         if (!rdev->is_atom_bios) {
2320                 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
2321                 return -EINVAL;
2322         }
2323         r = radeon_atombios_init(rdev);
2324         if (r)
2325                 return r;
2326
2327         /* Post card if necessary */
2328         if (!radeon_card_posted(rdev)) {
2329                 if (!rdev->bios) {
2330                         dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2331                         return -EINVAL;
2332                 }
2333                 DRM_INFO("GPU not posted. posting now...\n");
2334                 atom_asic_init(rdev->mode_info.atom_context);
2335         }
2336         /* init golden registers */
2337         ni_init_golden_registers(rdev);
2338         /* Initialize scratch registers */
2339         r600_scratch_init(rdev);
2340         /* Initialize surface registers */
2341         radeon_surface_init(rdev);
2342         /* Initialize clocks */
2343         radeon_get_clock_info(rdev->ddev);
2344         /* Fence driver */
2345         r = radeon_fence_driver_init(rdev);
2346         if (r)
2347                 return r;
2348         /* initialize memory controller */
2349         r = evergreen_mc_init(rdev);
2350         if (r)
2351                 return r;
2352         /* Memory manager */
2353         r = radeon_bo_init(rdev);
2354         if (r)
2355                 return r;
2356
2357         ring->ring_obj = NULL;
2358         r600_ring_init(rdev, ring, 1024 * 1024);
2359
2360         ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2361         ring->ring_obj = NULL;
2362         r600_ring_init(rdev, ring, 64 * 1024);
2363
2364         ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
2365         ring->ring_obj = NULL;
2366         r600_ring_init(rdev, ring, 64 * 1024);
2367
2368         r = radeon_uvd_init(rdev);
2369         if (!r) {
2370                 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2371                 ring->ring_obj = NULL;
2372                 r600_ring_init(rdev, ring, 4096);
2373         }
2374
2375         rdev->ih.ring_obj = NULL;
2376         r600_ih_ring_init(rdev, 64 * 1024);
2377
2378         r = r600_pcie_gart_init(rdev);
2379         if (r)
2380                 return r;
2381
2382         rdev->accel_working = true;
2383         r = cayman_startup(rdev);
2384         if (r) {
2385                 dev_err(rdev->dev, "disabling GPU acceleration\n");
2386                 cayman_cp_fini(rdev);
2387                 cayman_dma_fini(rdev);
2388                 r600_irq_fini(rdev);
2389                 if (rdev->flags & RADEON_IS_IGP)
2390                         sumo_rlc_fini(rdev);
2391                 radeon_wb_fini(rdev);
2392                 radeon_ib_pool_fini(rdev);
2393                 radeon_vm_manager_fini(rdev);
2394                 radeon_irq_kms_fini(rdev);
2395                 cayman_pcie_gart_fini(rdev);
2396                 rdev->accel_working = false;
2397         }
2398
2399         /* Don't start up if the MC ucode is missing.
2400          * The default clocks and voltages before the MC ucode
2401          * is loaded are not suffient for advanced operations.
2402          *
2403          * We can skip this check for TN, because there is no MC
2404          * ucode.
2405          */
2406         if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
2407                 DRM_ERROR("radeon: MC ucode required for NI+.\n");
2408                 return -EINVAL;
2409         }
2410
2411         return 0;
2412 }
2413
2414 void cayman_fini(struct radeon_device *rdev)
2415 {
2416         r600_blit_fini(rdev);
2417         cayman_cp_fini(rdev);
2418         cayman_dma_fini(rdev);
2419         r600_irq_fini(rdev);
2420         if (rdev->flags & RADEON_IS_IGP)
2421                 sumo_rlc_fini(rdev);
2422         radeon_wb_fini(rdev);
2423         radeon_vm_manager_fini(rdev);
2424         radeon_ib_pool_fini(rdev);
2425         radeon_irq_kms_fini(rdev);
2426         r600_uvd_stop(rdev);
2427         radeon_uvd_fini(rdev);
2428         cayman_pcie_gart_fini(rdev);
2429         r600_vram_scratch_fini(rdev);
2430         radeon_gem_fini(rdev);
2431         radeon_fence_driver_fini(rdev);
2432         radeon_bo_fini(rdev);
2433         radeon_atombios_fini(rdev);
2434         kfree(rdev->bios);
2435         rdev->bios = NULL;
2436 }
2437
2438 /*
2439  * vm
2440  */
2441 int cayman_vm_init(struct radeon_device *rdev)
2442 {
2443         /* number of VMs */
2444         rdev->vm_manager.nvm = 8;
2445         /* base offset of vram pages */
2446         if (rdev->flags & RADEON_IS_IGP) {
2447                 u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
2448                 tmp <<= 22;
2449                 rdev->vm_manager.vram_base_offset = tmp;
2450         } else
2451                 rdev->vm_manager.vram_base_offset = 0;
2452         return 0;
2453 }
2454
2455 void cayman_vm_fini(struct radeon_device *rdev)
2456 {
2457 }
2458
2459 /**
2460  * cayman_vm_decode_fault - print human readable fault info
2461  *
2462  * @rdev: radeon_device pointer
2463  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
2464  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
2465  *
2466  * Print human readable fault information (cayman/TN).
2467  */
2468 void cayman_vm_decode_fault(struct radeon_device *rdev,
2469                             u32 status, u32 addr)
2470 {
2471         u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
2472         u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
2473         u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
2474         char *block;
2475
2476         switch (mc_id) {
2477         case 32:
2478         case 16:
2479         case 96:
2480         case 80:
2481         case 160:
2482         case 144:
2483         case 224:
2484         case 208:
2485                 block = "CB";
2486                 break;
2487         case 33:
2488         case 17:
2489         case 97:
2490         case 81:
2491         case 161:
2492         case 145:
2493         case 225:
2494         case 209:
2495                 block = "CB_FMASK";
2496                 break;
2497         case 34:
2498         case 18:
2499         case 98:
2500         case 82:
2501         case 162:
2502         case 146:
2503         case 226:
2504         case 210:
2505                 block = "CB_CMASK";
2506                 break;
2507         case 35:
2508         case 19:
2509         case 99:
2510         case 83:
2511         case 163:
2512         case 147:
2513         case 227:
2514         case 211:
2515                 block = "CB_IMMED";
2516                 break;
2517         case 36:
2518         case 20:
2519         case 100:
2520         case 84:
2521         case 164:
2522         case 148:
2523         case 228:
2524         case 212:
2525                 block = "DB";
2526                 break;
2527         case 37:
2528         case 21:
2529         case 101:
2530         case 85:
2531         case 165:
2532         case 149:
2533         case 229:
2534         case 213:
2535                 block = "DB_HTILE";
2536                 break;
2537         case 38:
2538         case 22:
2539         case 102:
2540         case 86:
2541         case 166:
2542         case 150:
2543         case 230:
2544         case 214:
2545                 block = "SX";
2546                 break;
2547         case 39:
2548         case 23:
2549         case 103:
2550         case 87:
2551         case 167:
2552         case 151:
2553         case 231:
2554         case 215:
2555                 block = "DB_STEN";
2556                 break;
2557         case 40:
2558         case 24:
2559         case 104:
2560         case 88:
2561         case 232:
2562         case 216:
2563         case 168:
2564         case 152:
2565                 block = "TC_TFETCH";
2566                 break;
2567         case 41:
2568         case 25:
2569         case 105:
2570         case 89:
2571         case 233:
2572         case 217:
2573         case 169:
2574         case 153:
2575                 block = "TC_VFETCH";
2576                 break;
2577         case 42:
2578         case 26:
2579         case 106:
2580         case 90:
2581         case 234:
2582         case 218:
2583         case 170:
2584         case 154:
2585                 block = "VC";
2586                 break;
2587         case 112:
2588                 block = "CP";
2589                 break;
2590         case 113:
2591         case 114:
2592                 block = "SH";
2593                 break;
2594         case 115:
2595                 block = "VGT";
2596                 break;
2597         case 178:
2598                 block = "IH";
2599                 break;
2600         case 51:
2601                 block = "RLC";
2602                 break;
2603         case 55:
2604                 block = "DMA";
2605                 break;
2606         case 56:
2607                 block = "HDP";
2608                 break;
2609         default:
2610                 block = "unknown";
2611                 break;
2612         }
2613
2614         printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
2615                protections, vmid, addr,
2616                (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
2617                block, mc_id);
2618 }
2619
2620 #define R600_ENTRY_VALID   (1 << 0)
2621 #define R600_PTE_SYSTEM    (1 << 1)
2622 #define R600_PTE_SNOOPED   (1 << 2)
2623 #define R600_PTE_READABLE  (1 << 5)
2624 #define R600_PTE_WRITEABLE (1 << 6)
2625
2626 uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
2627 {
2628         uint32_t r600_flags = 0;
2629         r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
2630         r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
2631         r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
2632         if (flags & RADEON_VM_PAGE_SYSTEM) {
2633                 r600_flags |= R600_PTE_SYSTEM;
2634                 r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
2635         }
2636         return r600_flags;
2637 }
2638
2639 /**
2640  * cayman_vm_set_page - update the page tables using the CP
2641  *
2642  * @rdev: radeon_device pointer
2643  * @ib: indirect buffer to fill with commands
2644  * @pe: addr of the page entry
2645  * @addr: dst addr to write into pe
2646  * @count: number of page entries to update
2647  * @incr: increase next addr by incr bytes
2648  * @flags: access flags
2649  *
2650  * Update the page tables using the CP (cayman/TN).
2651  */
2652 void cayman_vm_set_page(struct radeon_device *rdev,
2653                         struct radeon_ib *ib,
2654                         uint64_t pe,
2655                         uint64_t addr, unsigned count,
2656                         uint32_t incr, uint32_t flags)
2657 {
2658         uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
2659         uint64_t value;
2660         unsigned ndw;
2661
2662         if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
2663                 while (count) {
2664                         ndw = 1 + count * 2;
2665                         if (ndw > 0x3FFF)
2666                                 ndw = 0x3FFF;
2667
2668                         ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
2669                         ib->ptr[ib->length_dw++] = pe;
2670                         ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
2671                         for (; ndw > 1; ndw -= 2, --count, pe += 8) {
2672                                 if (flags & RADEON_VM_PAGE_SYSTEM) {
2673                                         value = radeon_vm_map_gart(rdev, addr);
2674                                         value &= 0xFFFFFFFFFFFFF000ULL;
2675                                 } else if (flags & RADEON_VM_PAGE_VALID) {
2676                                         value = addr;
2677                                 } else {
2678                                         value = 0;
2679                                 }
2680                                 addr += incr;
2681                                 value |= r600_flags;
2682                                 ib->ptr[ib->length_dw++] = value;
2683                                 ib->ptr[ib->length_dw++] = upper_32_bits(value);
2684                         }
2685                 }
2686         } else {
2687                 if ((flags & RADEON_VM_PAGE_SYSTEM) ||
2688                     (count == 1)) {
2689                         while (count) {
2690                                 ndw = count * 2;
2691                                 if (ndw > 0xFFFFE)
2692                                         ndw = 0xFFFFE;
2693
2694                                 /* for non-physically contiguous pages (system) */
2695                                 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
2696                                 ib->ptr[ib->length_dw++] = pe;
2697                                 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
2698                                 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
2699                                         if (flags & RADEON_VM_PAGE_SYSTEM) {
2700                                                 value = radeon_vm_map_gart(rdev, addr);
2701                                                 value &= 0xFFFFFFFFFFFFF000ULL;
2702                                         } else if (flags & RADEON_VM_PAGE_VALID) {
2703                                                 value = addr;
2704                                         } else {
2705                                                 value = 0;
2706                                         }
2707                                         addr += incr;
2708                                         value |= r600_flags;
2709                                         ib->ptr[ib->length_dw++] = value;
2710                                         ib->ptr[ib->length_dw++] = upper_32_bits(value);
2711                                 }
2712                         }
2713                         while (ib->length_dw & 0x7)
2714                                 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
2715                 } else {
2716                         while (count) {
2717                                 ndw = count * 2;
2718                                 if (ndw > 0xFFFFE)
2719                                         ndw = 0xFFFFE;
2720
2721                                 if (flags & RADEON_VM_PAGE_VALID)
2722                                         value = addr;
2723                                 else
2724                                         value = 0;
2725                                 /* for physically contiguous pages (vram) */
2726                                 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
2727                                 ib->ptr[ib->length_dw++] = pe; /* dst addr */
2728                                 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
2729                                 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
2730                                 ib->ptr[ib->length_dw++] = 0;
2731                                 ib->ptr[ib->length_dw++] = value; /* value */
2732                                 ib->ptr[ib->length_dw++] = upper_32_bits(value);
2733                                 ib->ptr[ib->length_dw++] = incr; /* increment size */
2734                                 ib->ptr[ib->length_dw++] = 0;
2735                                 pe += ndw * 4;
2736                                 addr += (ndw / 2) * incr;
2737                                 count -= ndw / 2;
2738                         }
2739                 }
2740                 while (ib->length_dw & 0x7)
2741                         ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
2742         }
2743 }
2744
2745 /**
2746  * cayman_vm_flush - vm flush using the CP
2747  *
2748  * @rdev: radeon_device pointer
2749  *
2750  * Update the page table base and flush the VM TLB
2751  * using the CP (cayman-si).
2752  */
2753 void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2754 {
2755         struct radeon_ring *ring = &rdev->ring[ridx];
2756
2757         if (vm == NULL)
2758                 return;
2759
2760         radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
2761         radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
2762
2763         /* flush hdp cache */
2764         radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
2765         radeon_ring_write(ring, 0x1);
2766
2767         /* bits 0-7 are the VM contexts0-7 */
2768         radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
2769         radeon_ring_write(ring, 1 << vm->id);
2770
2771         /* sync PFP to ME, otherwise we might get invalid PFP reads */
2772         radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2773         radeon_ring_write(ring, 0x0);
2774 }
2775
2776 void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2777 {
2778         struct radeon_ring *ring = &rdev->ring[ridx];
2779
2780         if (vm == NULL)
2781                 return;
2782
2783         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
2784         radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
2785         radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
2786
2787         /* flush hdp cache */
2788         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
2789         radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
2790         radeon_ring_write(ring, 1);
2791
2792         /* bits 0-7 are the VM contexts0-7 */
2793         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
2794         radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
2795         radeon_ring_write(ring, 1 << vm->id);
2796 }
2797