]> rtime.felk.cvut.cz Git - linux-imx.git/blobdiff - drivers/gpu/drm/radeon/cik.c
drm/radeon: fix halting UVD
[linux-imx.git] / drivers / gpu / drm / radeon / cik.c
index 3e32b145341fee4e509f07f949ae58edae8b5fd8..524db70aaf6e71dc384fedd2da5d81cae143bb5d 100644 (file)
@@ -22,7 +22,6 @@
  * Authors: Alex Deucher
  */
 #include <linux/firmware.h>
-#include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include "drmP.h"
@@ -99,6 +98,439 @@ void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
        (void)RREG32(PCIE_DATA);
 }
 
+static const u32 bonaire_golden_spm_registers[] =
+{
+       0x30800, 0xe0ffffff, 0xe0000000
+};
+
+static const u32 bonaire_golden_common_registers[] =
+{
+       0xc770, 0xffffffff, 0x00000800,
+       0xc774, 0xffffffff, 0x00000800,
+       0xc798, 0xffffffff, 0x00007fbf,
+       0xc79c, 0xffffffff, 0x00007faf
+};
+
+static const u32 bonaire_golden_registers[] =
+{
+       0x3354, 0x00000333, 0x00000333,
+       0x3350, 0x000c0fc0, 0x00040200,
+       0x9a10, 0x00010000, 0x00058208,
+       0x3c000, 0xffff1fff, 0x00140000,
+       0x3c200, 0xfdfc0fff, 0x00000100,
+       0x3c234, 0x40000000, 0x40000200,
+       0x9830, 0xffffffff, 0x00000000,
+       0x9834, 0xf00fffff, 0x00000400,
+       0x9838, 0x0002021c, 0x00020200,
+       0xc78, 0x00000080, 0x00000000,
+       0x5bb0, 0x000000f0, 0x00000070,
+       0x5bc0, 0xf0311fff, 0x80300000,
+       0x98f8, 0x73773777, 0x12010001,
+       0x350c, 0x00810000, 0x408af000,
+       0x7030, 0x31000111, 0x00000011,
+       0x2f48, 0x73773777, 0x12010001,
+       0x220c, 0x00007fb6, 0x0021a1b1,
+       0x2210, 0x00007fb6, 0x002021b1,
+       0x2180, 0x00007fb6, 0x00002191,
+       0x2218, 0x00007fb6, 0x002121b1,
+       0x221c, 0x00007fb6, 0x002021b1,
+       0x21dc, 0x00007fb6, 0x00002191,
+       0x21e0, 0x00007fb6, 0x00002191,
+       0x3628, 0x0000003f, 0x0000000a,
+       0x362c, 0x0000003f, 0x0000000a,
+       0x2ae4, 0x00073ffe, 0x000022a2,
+       0x240c, 0x000007ff, 0x00000000,
+       0x8a14, 0xf000003f, 0x00000007,
+       0x8bf0, 0x00002001, 0x00000001,
+       0x8b24, 0xffffffff, 0x00ffffff,
+       0x30a04, 0x0000ff0f, 0x00000000,
+       0x28a4c, 0x07ffffff, 0x06000000,
+       0x4d8, 0x00000fff, 0x00000100,
+       0x3e78, 0x00000001, 0x00000002,
+       0x9100, 0x03000000, 0x0362c688,
+       0x8c00, 0x000000ff, 0x00000001,
+       0xe40, 0x00001fff, 0x00001fff,
+       0x9060, 0x0000007f, 0x00000020,
+       0x9508, 0x00010000, 0x00010000,
+       0xac14, 0x000003ff, 0x000000f3,
+       0xac0c, 0xffffffff, 0x00001032
+};
+
+static const u32 bonaire_mgcg_cgcg_init[] =
+{
+       0xc420, 0xffffffff, 0xfffffffc,
+       0x30800, 0xffffffff, 0xe0000000,
+       0x3c2a0, 0xffffffff, 0x00000100,
+       0x3c208, 0xffffffff, 0x00000100,
+       0x3c2c0, 0xffffffff, 0xc0000100,
+       0x3c2c8, 0xffffffff, 0xc0000100,
+       0x3c2c4, 0xffffffff, 0xc0000100,
+       0x55e4, 0xffffffff, 0x00600100,
+       0x3c280, 0xffffffff, 0x00000100,
+       0x3c214, 0xffffffff, 0x06000100,
+       0x3c220, 0xffffffff, 0x00000100,
+       0x3c218, 0xffffffff, 0x06000100,
+       0x3c204, 0xffffffff, 0x00000100,
+       0x3c2e0, 0xffffffff, 0x00000100,
+       0x3c224, 0xffffffff, 0x00000100,
+       0x3c200, 0xffffffff, 0x00000100,
+       0x3c230, 0xffffffff, 0x00000100,
+       0x3c234, 0xffffffff, 0x00000100,
+       0x3c250, 0xffffffff, 0x00000100,
+       0x3c254, 0xffffffff, 0x00000100,
+       0x3c258, 0xffffffff, 0x00000100,
+       0x3c25c, 0xffffffff, 0x00000100,
+       0x3c260, 0xffffffff, 0x00000100,
+       0x3c27c, 0xffffffff, 0x00000100,
+       0x3c278, 0xffffffff, 0x00000100,
+       0x3c210, 0xffffffff, 0x06000100,
+       0x3c290, 0xffffffff, 0x00000100,
+       0x3c274, 0xffffffff, 0x00000100,
+       0x3c2b4, 0xffffffff, 0x00000100,
+       0x3c2b0, 0xffffffff, 0x00000100,
+       0x3c270, 0xffffffff, 0x00000100,
+       0x30800, 0xffffffff, 0xe0000000,
+       0x3c020, 0xffffffff, 0x00010000,
+       0x3c024, 0xffffffff, 0x00030002,
+       0x3c028, 0xffffffff, 0x00040007,
+       0x3c02c, 0xffffffff, 0x00060005,
+       0x3c030, 0xffffffff, 0x00090008,
+       0x3c034, 0xffffffff, 0x00010000,
+       0x3c038, 0xffffffff, 0x00030002,
+       0x3c03c, 0xffffffff, 0x00040007,
+       0x3c040, 0xffffffff, 0x00060005,
+       0x3c044, 0xffffffff, 0x00090008,
+       0x3c048, 0xffffffff, 0x00010000,
+       0x3c04c, 0xffffffff, 0x00030002,
+       0x3c050, 0xffffffff, 0x00040007,
+       0x3c054, 0xffffffff, 0x00060005,
+       0x3c058, 0xffffffff, 0x00090008,
+       0x3c05c, 0xffffffff, 0x00010000,
+       0x3c060, 0xffffffff, 0x00030002,
+       0x3c064, 0xffffffff, 0x00040007,
+       0x3c068, 0xffffffff, 0x00060005,
+       0x3c06c, 0xffffffff, 0x00090008,
+       0x3c070, 0xffffffff, 0x00010000,
+       0x3c074, 0xffffffff, 0x00030002,
+       0x3c078, 0xffffffff, 0x00040007,
+       0x3c07c, 0xffffffff, 0x00060005,
+       0x3c080, 0xffffffff, 0x00090008,
+       0x3c084, 0xffffffff, 0x00010000,
+       0x3c088, 0xffffffff, 0x00030002,
+       0x3c08c, 0xffffffff, 0x00040007,
+       0x3c090, 0xffffffff, 0x00060005,
+       0x3c094, 0xffffffff, 0x00090008,
+       0x3c098, 0xffffffff, 0x00010000,
+       0x3c09c, 0xffffffff, 0x00030002,
+       0x3c0a0, 0xffffffff, 0x00040007,
+       0x3c0a4, 0xffffffff, 0x00060005,
+       0x3c0a8, 0xffffffff, 0x00090008,
+       0x3c000, 0xffffffff, 0x96e00200,
+       0x8708, 0xffffffff, 0x00900100,
+       0xc424, 0xffffffff, 0x0020003f,
+       0x38, 0xffffffff, 0x0140001c,
+       0x3c, 0x000f0000, 0x000f0000,
+       0x220, 0xffffffff, 0xC060000C,
+       0x224, 0xc0000fff, 0x00000100,
+       0xf90, 0xffffffff, 0x00000100,
+       0xf98, 0x00000101, 0x00000000,
+       0x20a8, 0xffffffff, 0x00000104,
+       0x55e4, 0xff000fff, 0x00000100,
+       0x30cc, 0xc0000fff, 0x00000104,
+       0xc1e4, 0x00000001, 0x00000001,
+       0xd00c, 0xff000ff0, 0x00000100,
+       0xd80c, 0xff000ff0, 0x00000100
+};
+
+static const u32 spectre_golden_spm_registers[] =
+{
+       0x30800, 0xe0ffffff, 0xe0000000
+};
+
+static const u32 spectre_golden_common_registers[] =
+{
+       0xc770, 0xffffffff, 0x00000800,
+       0xc774, 0xffffffff, 0x00000800,
+       0xc798, 0xffffffff, 0x00007fbf,
+       0xc79c, 0xffffffff, 0x00007faf
+};
+
+static const u32 spectre_golden_registers[] =
+{
+       0x3c000, 0xffff1fff, 0x96940200,
+       0x3c00c, 0xffff0001, 0xff000000,
+       0x3c200, 0xfffc0fff, 0x00000100,
+       0x6ed8, 0x00010101, 0x00010000,
+       0x9834, 0xf00fffff, 0x00000400,
+       0x9838, 0xfffffffc, 0x00020200,
+       0x5bb0, 0x000000f0, 0x00000070,
+       0x5bc0, 0xf0311fff, 0x80300000,
+       0x98f8, 0x73773777, 0x12010001,
+       0x9b7c, 0x00ff0000, 0x00fc0000,
+       0x2f48, 0x73773777, 0x12010001,
+       0x8a14, 0xf000003f, 0x00000007,
+       0x8b24, 0xffffffff, 0x00ffffff,
+       0x28350, 0x3f3f3fff, 0x00000082,
+       0x28355, 0x0000003f, 0x00000000,
+       0x3e78, 0x00000001, 0x00000002,
+       0x913c, 0xffff03df, 0x00000004,
+       0xc768, 0x00000008, 0x00000008,
+       0x8c00, 0x000008ff, 0x00000800,
+       0x9508, 0x00010000, 0x00010000,
+       0xac0c, 0xffffffff, 0x54763210,
+       0x214f8, 0x01ff01ff, 0x00000002,
+       0x21498, 0x007ff800, 0x00200000,
+       0x2015c, 0xffffffff, 0x00000f40,
+       0x30934, 0xffffffff, 0x00000001
+};
+
+static const u32 spectre_mgcg_cgcg_init[] =
+{
+       0xc420, 0xffffffff, 0xfffffffc,
+       0x30800, 0xffffffff, 0xe0000000,
+       0x3c2a0, 0xffffffff, 0x00000100,
+       0x3c208, 0xffffffff, 0x00000100,
+       0x3c2c0, 0xffffffff, 0x00000100,
+       0x3c2c8, 0xffffffff, 0x00000100,
+       0x3c2c4, 0xffffffff, 0x00000100,
+       0x55e4, 0xffffffff, 0x00600100,
+       0x3c280, 0xffffffff, 0x00000100,
+       0x3c214, 0xffffffff, 0x06000100,
+       0x3c220, 0xffffffff, 0x00000100,
+       0x3c218, 0xffffffff, 0x06000100,
+       0x3c204, 0xffffffff, 0x00000100,
+       0x3c2e0, 0xffffffff, 0x00000100,
+       0x3c224, 0xffffffff, 0x00000100,
+       0x3c200, 0xffffffff, 0x00000100,
+       0x3c230, 0xffffffff, 0x00000100,
+       0x3c234, 0xffffffff, 0x00000100,
+       0x3c250, 0xffffffff, 0x00000100,
+       0x3c254, 0xffffffff, 0x00000100,
+       0x3c258, 0xffffffff, 0x00000100,
+       0x3c25c, 0xffffffff, 0x00000100,
+       0x3c260, 0xffffffff, 0x00000100,
+       0x3c27c, 0xffffffff, 0x00000100,
+       0x3c278, 0xffffffff, 0x00000100,
+       0x3c210, 0xffffffff, 0x06000100,
+       0x3c290, 0xffffffff, 0x00000100,
+       0x3c274, 0xffffffff, 0x00000100,
+       0x3c2b4, 0xffffffff, 0x00000100,
+       0x3c2b0, 0xffffffff, 0x00000100,
+       0x3c270, 0xffffffff, 0x00000100,
+       0x30800, 0xffffffff, 0xe0000000,
+       0x3c020, 0xffffffff, 0x00010000,
+       0x3c024, 0xffffffff, 0x00030002,
+       0x3c028, 0xffffffff, 0x00040007,
+       0x3c02c, 0xffffffff, 0x00060005,
+       0x3c030, 0xffffffff, 0x00090008,
+       0x3c034, 0xffffffff, 0x00010000,
+       0x3c038, 0xffffffff, 0x00030002,
+       0x3c03c, 0xffffffff, 0x00040007,
+       0x3c040, 0xffffffff, 0x00060005,
+       0x3c044, 0xffffffff, 0x00090008,
+       0x3c048, 0xffffffff, 0x00010000,
+       0x3c04c, 0xffffffff, 0x00030002,
+       0x3c050, 0xffffffff, 0x00040007,
+       0x3c054, 0xffffffff, 0x00060005,
+       0x3c058, 0xffffffff, 0x00090008,
+       0x3c05c, 0xffffffff, 0x00010000,
+       0x3c060, 0xffffffff, 0x00030002,
+       0x3c064, 0xffffffff, 0x00040007,
+       0x3c068, 0xffffffff, 0x00060005,
+       0x3c06c, 0xffffffff, 0x00090008,
+       0x3c070, 0xffffffff, 0x00010000,
+       0x3c074, 0xffffffff, 0x00030002,
+       0x3c078, 0xffffffff, 0x00040007,
+       0x3c07c, 0xffffffff, 0x00060005,
+       0x3c080, 0xffffffff, 0x00090008,
+       0x3c084, 0xffffffff, 0x00010000,
+       0x3c088, 0xffffffff, 0x00030002,
+       0x3c08c, 0xffffffff, 0x00040007,
+       0x3c090, 0xffffffff, 0x00060005,
+       0x3c094, 0xffffffff, 0x00090008,
+       0x3c098, 0xffffffff, 0x00010000,
+       0x3c09c, 0xffffffff, 0x00030002,
+       0x3c0a0, 0xffffffff, 0x00040007,
+       0x3c0a4, 0xffffffff, 0x00060005,
+       0x3c0a8, 0xffffffff, 0x00090008,
+       0x3c0ac, 0xffffffff, 0x00010000,
+       0x3c0b0, 0xffffffff, 0x00030002,
+       0x3c0b4, 0xffffffff, 0x00040007,
+       0x3c0b8, 0xffffffff, 0x00060005,
+       0x3c0bc, 0xffffffff, 0x00090008,
+       0x3c000, 0xffffffff, 0x96e00200,
+       0x8708, 0xffffffff, 0x00900100,
+       0xc424, 0xffffffff, 0x0020003f,
+       0x38, 0xffffffff, 0x0140001c,
+       0x3c, 0x000f0000, 0x000f0000,
+       0x220, 0xffffffff, 0xC060000C,
+       0x224, 0xc0000fff, 0x00000100,
+       0xf90, 0xffffffff, 0x00000100,
+       0xf98, 0x00000101, 0x00000000,
+       0x20a8, 0xffffffff, 0x00000104,
+       0x55e4, 0xff000fff, 0x00000100,
+       0x30cc, 0xc0000fff, 0x00000104,
+       0xc1e4, 0x00000001, 0x00000001,
+       0xd00c, 0xff000ff0, 0x00000100,
+       0xd80c, 0xff000ff0, 0x00000100
+};
+
+static const u32 kalindi_golden_spm_registers[] =
+{
+       0x30800, 0xe0ffffff, 0xe0000000
+};
+
+static const u32 kalindi_golden_common_registers[] =
+{
+       0xc770, 0xffffffff, 0x00000800,
+       0xc774, 0xffffffff, 0x00000800,
+       0xc798, 0xffffffff, 0x00007fbf,
+       0xc79c, 0xffffffff, 0x00007faf
+};
+
+static const u32 kalindi_golden_registers[] =
+{
+       0x3c000, 0xffffdfff, 0x6e944040,
+       0x55e4, 0xff607fff, 0xfc000100,
+       0x3c220, 0xff000fff, 0x00000100,
+       0x3c224, 0xff000fff, 0x00000100,
+       0x3c200, 0xfffc0fff, 0x00000100,
+       0x6ed8, 0x00010101, 0x00010000,
+       0x9830, 0xffffffff, 0x00000000,
+       0x9834, 0xf00fffff, 0x00000400,
+       0x5bb0, 0x000000f0, 0x00000070,
+       0x5bc0, 0xf0311fff, 0x80300000,
+       0x98f8, 0x73773777, 0x12010001,
+       0x98fc, 0xffffffff, 0x00000010,
+       0x9b7c, 0x00ff0000, 0x00fc0000,
+       0x8030, 0x00001f0f, 0x0000100a,
+       0x2f48, 0x73773777, 0x12010001,
+       0x2408, 0x000fffff, 0x000c007f,
+       0x8a14, 0xf000003f, 0x00000007,
+       0x8b24, 0x3fff3fff, 0x00ffcfff,
+       0x30a04, 0x0000ff0f, 0x00000000,
+       0x28a4c, 0x07ffffff, 0x06000000,
+       0x4d8, 0x00000fff, 0x00000100,
+       0x3e78, 0x00000001, 0x00000002,
+       0xc768, 0x00000008, 0x00000008,
+       0x8c00, 0x000000ff, 0x00000003,
+       0x214f8, 0x01ff01ff, 0x00000002,
+       0x21498, 0x007ff800, 0x00200000,
+       0x2015c, 0xffffffff, 0x00000f40,
+       0x88c4, 0x001f3ae3, 0x00000082,
+       0x88d4, 0x0000001f, 0x00000010,
+       0x30934, 0xffffffff, 0x00000000
+};
+
+static const u32 kalindi_mgcg_cgcg_init[] =
+{
+       0xc420, 0xffffffff, 0xfffffffc,
+       0x30800, 0xffffffff, 0xe0000000,
+       0x3c2a0, 0xffffffff, 0x00000100,
+       0x3c208, 0xffffffff, 0x00000100,
+       0x3c2c0, 0xffffffff, 0x00000100,
+       0x3c2c8, 0xffffffff, 0x00000100,
+       0x3c2c4, 0xffffffff, 0x00000100,
+       0x55e4, 0xffffffff, 0x00600100,
+       0x3c280, 0xffffffff, 0x00000100,
+       0x3c214, 0xffffffff, 0x06000100,
+       0x3c220, 0xffffffff, 0x00000100,
+       0x3c218, 0xffffffff, 0x06000100,
+       0x3c204, 0xffffffff, 0x00000100,
+       0x3c2e0, 0xffffffff, 0x00000100,
+       0x3c224, 0xffffffff, 0x00000100,
+       0x3c200, 0xffffffff, 0x00000100,
+       0x3c230, 0xffffffff, 0x00000100,
+       0x3c234, 0xffffffff, 0x00000100,
+       0x3c250, 0xffffffff, 0x00000100,
+       0x3c254, 0xffffffff, 0x00000100,
+       0x3c258, 0xffffffff, 0x00000100,
+       0x3c25c, 0xffffffff, 0x00000100,
+       0x3c260, 0xffffffff, 0x00000100,
+       0x3c27c, 0xffffffff, 0x00000100,
+       0x3c278, 0xffffffff, 0x00000100,
+       0x3c210, 0xffffffff, 0x06000100,
+       0x3c290, 0xffffffff, 0x00000100,
+       0x3c274, 0xffffffff, 0x00000100,
+       0x3c2b4, 0xffffffff, 0x00000100,
+       0x3c2b0, 0xffffffff, 0x00000100,
+       0x3c270, 0xffffffff, 0x00000100,
+       0x30800, 0xffffffff, 0xe0000000,
+       0x3c020, 0xffffffff, 0x00010000,
+       0x3c024, 0xffffffff, 0x00030002,
+       0x3c028, 0xffffffff, 0x00040007,
+       0x3c02c, 0xffffffff, 0x00060005,
+       0x3c030, 0xffffffff, 0x00090008,
+       0x3c034, 0xffffffff, 0x00010000,
+       0x3c038, 0xffffffff, 0x00030002,
+       0x3c03c, 0xffffffff, 0x00040007,
+       0x3c040, 0xffffffff, 0x00060005,
+       0x3c044, 0xffffffff, 0x00090008,
+       0x3c000, 0xffffffff, 0x96e00200,
+       0x8708, 0xffffffff, 0x00900100,
+       0xc424, 0xffffffff, 0x0020003f,
+       0x38, 0xffffffff, 0x0140001c,
+       0x3c, 0x000f0000, 0x000f0000,
+       0x220, 0xffffffff, 0xC060000C,
+       0x224, 0xc0000fff, 0x00000100,
+       0x20a8, 0xffffffff, 0x00000104,
+       0x55e4, 0xff000fff, 0x00000100,
+       0x30cc, 0xc0000fff, 0x00000104,
+       0xc1e4, 0x00000001, 0x00000001,
+       0xd00c, 0xff000ff0, 0x00000100,
+       0xd80c, 0xff000ff0, 0x00000100
+};
+
+static void cik_init_golden_registers(struct radeon_device *rdev)
+{
+       switch (rdev->family) {
+       case CHIP_BONAIRE:
+               radeon_program_register_sequence(rdev,
+                                                bonaire_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init));
+               radeon_program_register_sequence(rdev,
+                                                bonaire_golden_registers,
+                                                (const u32)ARRAY_SIZE(bonaire_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                bonaire_golden_common_registers,
+                                                (const u32)ARRAY_SIZE(bonaire_golden_common_registers));
+               radeon_program_register_sequence(rdev,
+                                                bonaire_golden_spm_registers,
+                                                (const u32)ARRAY_SIZE(bonaire_golden_spm_registers));
+               break;
+       case CHIP_KABINI:
+               radeon_program_register_sequence(rdev,
+                                                kalindi_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
+               radeon_program_register_sequence(rdev,
+                                                kalindi_golden_registers,
+                                                (const u32)ARRAY_SIZE(kalindi_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                kalindi_golden_common_registers,
+                                                (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
+               radeon_program_register_sequence(rdev,
+                                                kalindi_golden_spm_registers,
+                                                (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
+               break;
+       case CHIP_KAVERI:
+               radeon_program_register_sequence(rdev,
+                                                spectre_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init));
+               radeon_program_register_sequence(rdev,
+                                                spectre_golden_registers,
+                                                (const u32)ARRAY_SIZE(spectre_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                spectre_golden_common_registers,
+                                                (const u32)ARRAY_SIZE(spectre_golden_common_registers));
+               radeon_program_register_sequence(rdev,
+                                                spectre_golden_spm_registers,
+                                                (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
+               break;
+       default:
+               break;
+       }
+}
+
 /**
  * cik_get_xclk - get the xclk
  *
@@ -121,6 +553,44 @@ u32 cik_get_xclk(struct radeon_device *rdev)
        return reference_clock;
 }
 
+/**
+ * cik_mm_rdoorbell - read a doorbell dword
+ *
+ * @rdev: radeon_device pointer
+ * @offset: byte offset into the aperture
+ *
+ * Returns the value in the doorbell aperture at the
+ * requested offset (CIK).
+ */
+u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
+{
+       if (offset < rdev->doorbell.size) {
+               return readl(((void __iomem *)rdev->doorbell.ptr) + offset);
+       } else {
+               DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", offset);
+               return 0;
+       }
+}
+
+/**
+ * cik_mm_wdoorbell - write a doorbell dword
+ *
+ * @rdev: radeon_device pointer
+ * @offset: byte offset into the aperture
+ * @v: value to write
+ *
+ * Writes @v to the doorbell aperture at the
+ * requested offset (CIK).
+ */
+void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v)
+{
+       if (offset < rdev->doorbell.size) {
+               writel(v, ((void __iomem *)rdev->doorbell.ptr) + offset);
+       } else {
+               DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", offset);
+       }
+}
+
 #define BONAIRE_IO_MC_REGS_SIZE 36
 
 static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
@@ -163,6 +633,29 @@ static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
        {0x0000009f, 0x00b48000}
 };
 
+/**
+ * cik_srbm_select - select specific register instances
+ *
+ * @rdev: radeon_device pointer
+ * @me: selected ME (micro engine)
+ * @pipe: pipe
+ * @queue: queue
+ * @vmid: VMID
+ *
+ * Switches the currently active registers instances.  Some
+ * registers are instanced per VMID, others are instanced per
+ * me/pipe/queue combination.
+ */
+static void cik_srbm_select(struct radeon_device *rdev,
+                           u32 me, u32 pipe, u32 queue, u32 vmid)
+{
+       u32 srbm_gfx_cntl = (PIPEID(pipe & 0x3) |
+                            MEID(me & 0x3) |
+                            VMID(vmid & 0xf) |
+                            QUEUEID(queue & 0x7));
+       WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl);
+}
+
 /* ucode loading */
 /**
  * ci_mc_load_microcode - load MC ucode into the hw
@@ -248,7 +741,6 @@ static int ci_mc_load_microcode(struct radeon_device *rdev)
  */
 static int cik_init_microcode(struct radeon_device *rdev)
 {
-       struct platform_device *pdev;
        const char *chip_name;
        size_t pfp_req_size, me_req_size, ce_req_size,
                mec_req_size, rlc_req_size, mc_req_size,
@@ -258,13 +750,6 @@ static int cik_init_microcode(struct radeon_device *rdev)
 
        DRM_DEBUG("\n");
 
-       pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
-       err = IS_ERR(pdev);
-       if (err) {
-               printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
-               return -EINVAL;
-       }
-
        switch (rdev->family) {
        case CHIP_BONAIRE:
                chip_name = "BONAIRE";
@@ -300,7 +785,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
        DRM_INFO("Loading %s Microcode\n", chip_name);
 
        snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
-       err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
+       err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
        if (err)
                goto out;
        if (rdev->pfp_fw->size != pfp_req_size) {
@@ -312,7 +797,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
        }
 
        snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
-       err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+       err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
        if (err)
                goto out;
        if (rdev->me_fw->size != me_req_size) {
@@ -323,7 +808,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
        }
 
        snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
-       err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev);
+       err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
        if (err)
                goto out;
        if (rdev->ce_fw->size != ce_req_size) {
@@ -334,7 +819,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
        }
 
        snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
-       err = request_firmware(&rdev->mec_fw, fw_name, &pdev->dev);
+       err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
        if (err)
                goto out;
        if (rdev->mec_fw->size != mec_req_size) {
@@ -345,7 +830,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
        }
 
        snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
-       err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+       err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
        if (err)
                goto out;
        if (rdev->rlc_fw->size != rlc_req_size) {
@@ -356,7 +841,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
        }
 
        snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
-       err = request_firmware(&rdev->sdma_fw, fw_name, &pdev->dev);
+       err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
        if (err)
                goto out;
        if (rdev->sdma_fw->size != sdma_req_size) {
@@ -369,7 +854,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
        /* No MC ucode on APUs */
        if (!(rdev->flags & RADEON_IS_IGP)) {
                snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
-               err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
+               err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
                if (err)
                        goto out;
                if (rdev->mc_fw->size != mc_req_size) {
@@ -381,8 +866,6 @@ static int cik_init_microcode(struct radeon_device *rdev)
        }
 
 out:
-       platform_device_unregister(pdev);
-
        if (err) {
                if (err != -EINVAL)
                        printk(KERN_ERR
@@ -565,6 +1048,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
                                gb_tile_moden = 0;
                                break;
                        }
+                       rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
                        WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
                }
                for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
@@ -783,6 +1267,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
                                        gb_tile_moden = 0;
                                        break;
                                }
+                               rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
                                WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
                        }
                } else if (num_rbs < 4) {
@@ -908,6 +1393,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
                                        gb_tile_moden = 0;
                                        break;
                                }
+                               rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
                                WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
                        }
                }
@@ -1125,6 +1611,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
                                gb_tile_moden = 0;
                                break;
                        }
+                       rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
                        WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
                }
                for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
@@ -1240,7 +1727,7 @@ static void cik_select_se_sh(struct radeon_device *rdev,
        u32 data = INSTANCE_BROADCAST_WRITES;
 
        if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
-               data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+               data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
        else if (se_num == 0xffffffff)
                data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
        else if (sh_num == 0xffffffff)
@@ -1626,6 +2113,7 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
        radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
        radeon_ring_write(ring, 0xDEADBEEF);
        radeon_ring_unlock_commit(rdev, ring);
+
        for (i = 0; i < rdev->usec_timeout; i++) {
                tmp = RREG32(scratch);
                if (tmp == 0xDEADBEEF)
@@ -1644,7 +2132,7 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
 }
 
 /**
- * cik_fence_ring_emit - emit a fence on the gfx ring
+ * cik_fence_gfx_ring_emit - emit a fence on the gfx ring
  *
  * @rdev: radeon_device pointer
  * @fence: radeon fence object
@@ -1652,8 +2140,8 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
  * Emits a fence sequnce number on the gfx ring and flushes
  * GPU caches.
  */
-void cik_fence_ring_emit(struct radeon_device *rdev,
-                        struct radeon_fence *fence)
+void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
+                            struct radeon_fence *fence)
 {
        struct radeon_ring *ring = &rdev->ring[fence->ring];
        u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
@@ -1680,6 +2168,44 @@ void cik_fence_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(ring, 0);
 }
 
+/**
+ * cik_fence_compute_ring_emit - emit a fence on the compute ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Emits a fence sequnce number on the compute ring and flushes
+ * GPU caches.
+ */
+void cik_fence_compute_ring_emit(struct radeon_device *rdev,
+                                struct radeon_fence *fence)
+{
+       struct radeon_ring *ring = &rdev->ring[fence->ring];
+       u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+       /* RELEASE_MEM - flush caches, send int */
+       radeon_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
+       radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
+                                EOP_TC_ACTION_EN |
+                                EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
+                                EVENT_INDEX(5)));
+       radeon_ring_write(ring, DATA_SEL(1) | INT_SEL(2));
+       radeon_ring_write(ring, addr & 0xfffffffc);
+       radeon_ring_write(ring, upper_32_bits(addr));
+       radeon_ring_write(ring, fence->seq);
+       radeon_ring_write(ring, 0);
+       /* HDP flush */
+       /* We should be using the new WAIT_REG_MEM special op packet here
+        * but it causes the CP to hang
+        */
+       radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+                                WRITE_DATA_DST_SEL(0)));
+       radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+}
+
 void cik_semaphore_ring_emit(struct radeon_device *rdev,
                             struct radeon_ring *ring,
                             struct radeon_semaphore *semaphore,
@@ -2051,6 +2577,51 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
        return 0;
 }
 
+u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
+                             struct radeon_ring *ring)
+{
+       u32 rptr;
+
+
+
+       if (rdev->wb.enabled) {
+               rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+       } else {
+               cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
+               rptr = RREG32(CP_HQD_PQ_RPTR);
+               cik_srbm_select(rdev, 0, 0, 0, 0);
+       }
+       rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
+
+       return rptr;
+}
+
+u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
+                             struct radeon_ring *ring)
+{
+       u32 wptr;
+
+       if (rdev->wb.enabled) {
+               wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]);
+       } else {
+               cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
+               wptr = RREG32(CP_HQD_PQ_WPTR);
+               cik_srbm_select(rdev, 0, 0, 0, 0);
+       }
+       wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
+
+       return wptr;
+}
+
+void cik_compute_ring_set_wptr(struct radeon_device *rdev,
+                              struct radeon_ring *ring)
+{
+       u32 wptr = (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask;
+
+       rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(wptr);
+       WDOORBELL32(ring->doorbell_offset, wptr);
+}
+
 /**
  * cik_cp_compute_enable - enable/disable the compute CP MEs
  *
@@ -2115,7 +2686,8 @@ static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
  */
 static int cik_cp_compute_start(struct radeon_device *rdev)
 {
-       //todo
+       cik_cp_compute_enable(rdev, true);
+
        return 0;
 }
 
@@ -2129,10 +2701,171 @@ static int cik_cp_compute_start(struct radeon_device *rdev)
  */
 static void cik_cp_compute_fini(struct radeon_device *rdev)
 {
+       int i, idx, r;
+
        cik_cp_compute_enable(rdev, false);
-       //todo
+
+       for (i = 0; i < 2; i++) {
+               if (i == 0)
+                       idx = CAYMAN_RING_TYPE_CP1_INDEX;
+               else
+                       idx = CAYMAN_RING_TYPE_CP2_INDEX;
+
+               if (rdev->ring[idx].mqd_obj) {
+                       r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
+                       if (unlikely(r != 0))
+                               dev_warn(rdev->dev, "(%d) reserve MQD bo failed\n", r);
+
+                       radeon_bo_unpin(rdev->ring[idx].mqd_obj);
+                       radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
+
+                       radeon_bo_unref(&rdev->ring[idx].mqd_obj);
+                       rdev->ring[idx].mqd_obj = NULL;
+               }
+       }
+}
+
+static void cik_mec_fini(struct radeon_device *rdev)
+{
+       int r;
+
+       if (rdev->mec.hpd_eop_obj) {
+               r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
+               if (unlikely(r != 0))
+                       dev_warn(rdev->dev, "(%d) reserve HPD EOP bo failed\n", r);
+               radeon_bo_unpin(rdev->mec.hpd_eop_obj);
+               radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
+
+               radeon_bo_unref(&rdev->mec.hpd_eop_obj);
+               rdev->mec.hpd_eop_obj = NULL;
+       }
+}
+
+#define MEC_HPD_SIZE 2048
+
+static int cik_mec_init(struct radeon_device *rdev)
+{
+       int r;
+       u32 *hpd;
+
+       /*
+        * KV:    2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
+        * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
+        */
+       if (rdev->family == CHIP_KAVERI)
+               rdev->mec.num_mec = 2;
+       else
+               rdev->mec.num_mec = 1;
+       rdev->mec.num_pipe = 4;
+       rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
+
+       if (rdev->mec.hpd_eop_obj == NULL) {
+               r = radeon_bo_create(rdev,
+                                    rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
+                                    PAGE_SIZE, true,
+                                    RADEON_GEM_DOMAIN_GTT, NULL,
+                                    &rdev->mec.hpd_eop_obj);
+               if (r) {
+                       dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
+                       return r;
+               }
+       }
+
+       r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
+       if (unlikely(r != 0)) {
+               cik_mec_fini(rdev);
+               return r;
+       }
+       r = radeon_bo_pin(rdev->mec.hpd_eop_obj, RADEON_GEM_DOMAIN_GTT,
+                         &rdev->mec.hpd_eop_gpu_addr);
+       if (r) {
+               dev_warn(rdev->dev, "(%d) pin HDP EOP bo failed\n", r);
+               cik_mec_fini(rdev);
+               return r;
+       }
+       r = radeon_bo_kmap(rdev->mec.hpd_eop_obj, (void **)&hpd);
+       if (r) {
+               dev_warn(rdev->dev, "(%d) map HDP EOP bo failed\n", r);
+               cik_mec_fini(rdev);
+               return r;
+       }
+
+       /* clear memory.  Not sure if this is required or not */
+       memset(hpd, 0, rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2);
+
+       radeon_bo_kunmap(rdev->mec.hpd_eop_obj);
+       radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
+
+       return 0;
 }
 
+struct hqd_registers
+{
+       u32 cp_mqd_base_addr;
+       u32 cp_mqd_base_addr_hi;
+       u32 cp_hqd_active;
+       u32 cp_hqd_vmid;
+       u32 cp_hqd_persistent_state;
+       u32 cp_hqd_pipe_priority;
+       u32 cp_hqd_queue_priority;
+       u32 cp_hqd_quantum;
+       u32 cp_hqd_pq_base;
+       u32 cp_hqd_pq_base_hi;
+       u32 cp_hqd_pq_rptr;
+       u32 cp_hqd_pq_rptr_report_addr;
+       u32 cp_hqd_pq_rptr_report_addr_hi;
+       u32 cp_hqd_pq_wptr_poll_addr;
+       u32 cp_hqd_pq_wptr_poll_addr_hi;
+       u32 cp_hqd_pq_doorbell_control;
+       u32 cp_hqd_pq_wptr;
+       u32 cp_hqd_pq_control;
+       u32 cp_hqd_ib_base_addr;
+       u32 cp_hqd_ib_base_addr_hi;
+       u32 cp_hqd_ib_rptr;
+       u32 cp_hqd_ib_control;
+       u32 cp_hqd_iq_timer;
+       u32 cp_hqd_iq_rptr;
+       u32 cp_hqd_dequeue_request;
+       u32 cp_hqd_dma_offload;
+       u32 cp_hqd_sema_cmd;
+       u32 cp_hqd_msg_type;
+       u32 cp_hqd_atomic0_preop_lo;
+       u32 cp_hqd_atomic0_preop_hi;
+       u32 cp_hqd_atomic1_preop_lo;
+       u32 cp_hqd_atomic1_preop_hi;
+       u32 cp_hqd_hq_scheduler0;
+       u32 cp_hqd_hq_scheduler1;
+       u32 cp_mqd_control;
+};
+
+struct bonaire_mqd
+{
+       u32 header;
+       u32 dispatch_initiator;
+       u32 dimensions[3];
+       u32 start_idx[3];
+       u32 num_threads[3];
+       u32 pipeline_stat_enable;
+       u32 perf_counter_enable;
+       u32 pgm[2];
+       u32 tba[2];
+       u32 tma[2];
+       u32 pgm_rsrc[2];
+       u32 vmid;
+       u32 resource_limits;
+       u32 static_thread_mgmt01[2];
+       u32 tmp_ring_size;
+       u32 static_thread_mgmt23[2];
+       u32 restart[3];
+       u32 thread_trace_enable;
+       u32 reserved1;
+       u32 user_data[16];
+       u32 vgtcs_invoke_count[2];
+       struct hqd_registers queue_state;
+       u32 dequeue_cntr;
+       u32 interrupt_queue[64];
+};
+
 /**
  * cik_cp_compute_resume - setup the compute queue registers
  *
@@ -2144,24 +2877,247 @@ static void cik_cp_compute_fini(struct radeon_device *rdev)
  */
 static int cik_cp_compute_resume(struct radeon_device *rdev)
 {
-       int r;
+       int r, i, idx;
+       u32 tmp;
+       bool use_doorbell = true;
+       u64 hqd_gpu_addr;
+       u64 mqd_gpu_addr;
+       u64 eop_gpu_addr;
+       u64 wb_gpu_addr;
+       u32 *buf;
+       struct bonaire_mqd *mqd;
 
-       //todo
        r = cik_cp_compute_start(rdev);
        if (r)
                return r;
+
+       /* fix up chicken bits */
+       tmp = RREG32(CP_CPF_DEBUG);
+       tmp |= (1 << 23);
+       WREG32(CP_CPF_DEBUG, tmp);
+
+       /* init the pipes */
+       for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
+               int me = (i < 4) ? 1 : 2;
+               int pipe = (i < 4) ? i : (i - 4);
+
+               eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
+
+               cik_srbm_select(rdev, me, pipe, 0, 0);
+
+               /* write the EOP addr */
+               WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
+               WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
+
+               /* set the VMID assigned */
+               WREG32(CP_HPD_EOP_VMID, 0);
+
+               /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+               tmp = RREG32(CP_HPD_EOP_CONTROL);
+               tmp &= ~EOP_SIZE_MASK;
+               tmp |= drm_order(MEC_HPD_SIZE / 8);
+               WREG32(CP_HPD_EOP_CONTROL, tmp);
+       }
+       cik_srbm_select(rdev, 0, 0, 0, 0);
+
+       /* init the queues.  Just two for now. */
+       for (i = 0; i < 2; i++) {
+               if (i == 0)
+                       idx = CAYMAN_RING_TYPE_CP1_INDEX;
+               else
+                       idx = CAYMAN_RING_TYPE_CP2_INDEX;
+
+               if (rdev->ring[idx].mqd_obj == NULL) {
+                       r = radeon_bo_create(rdev,
+                                            sizeof(struct bonaire_mqd),
+                                            PAGE_SIZE, true,
+                                            RADEON_GEM_DOMAIN_GTT, NULL,
+                                            &rdev->ring[idx].mqd_obj);
+                       if (r) {
+                               dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
+                               return r;
+                       }
+               }
+
+               r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
+               if (unlikely(r != 0)) {
+                       cik_cp_compute_fini(rdev);
+                       return r;
+               }
+               r = radeon_bo_pin(rdev->ring[idx].mqd_obj, RADEON_GEM_DOMAIN_GTT,
+                                 &mqd_gpu_addr);
+               if (r) {
+                       dev_warn(rdev->dev, "(%d) pin MQD bo failed\n", r);
+                       cik_cp_compute_fini(rdev);
+                       return r;
+               }
+               r = radeon_bo_kmap(rdev->ring[idx].mqd_obj, (void **)&buf);
+               if (r) {
+                       dev_warn(rdev->dev, "(%d) map MQD bo failed\n", r);
+                       cik_cp_compute_fini(rdev);
+                       return r;
+               }
+
+               /* doorbell offset */
+               rdev->ring[idx].doorbell_offset =
+                       (rdev->ring[idx].doorbell_page_num * PAGE_SIZE) + 0;
+
+               /* init the mqd struct */
+               memset(buf, 0, sizeof(struct bonaire_mqd));
+
+               mqd = (struct bonaire_mqd *)buf;
+               mqd->header = 0xC0310800;
+               mqd->static_thread_mgmt01[0] = 0xffffffff;
+               mqd->static_thread_mgmt01[1] = 0xffffffff;
+               mqd->static_thread_mgmt23[0] = 0xffffffff;
+               mqd->static_thread_mgmt23[1] = 0xffffffff;
+
+               cik_srbm_select(rdev, rdev->ring[idx].me,
+                               rdev->ring[idx].pipe,
+                               rdev->ring[idx].queue, 0);
+
+               /* disable wptr polling */
+               tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
+               tmp &= ~WPTR_POLL_EN;
+               WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
+
+               /* enable doorbell? */
+               mqd->queue_state.cp_hqd_pq_doorbell_control =
+                       RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
+               if (use_doorbell)
+                       mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
+               else
+                       mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_EN;
+               WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
+                      mqd->queue_state.cp_hqd_pq_doorbell_control);
+
+               /* disable the queue if it's active */
+               mqd->queue_state.cp_hqd_dequeue_request = 0;
+               mqd->queue_state.cp_hqd_pq_rptr = 0;
+               mqd->queue_state.cp_hqd_pq_wptr= 0;
+               if (RREG32(CP_HQD_ACTIVE) & 1) {
+                       WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
+                       for (i = 0; i < rdev->usec_timeout; i++) {
+                               if (!(RREG32(CP_HQD_ACTIVE) & 1))
+                                       break;
+                               udelay(1);
+                       }
+                       WREG32(CP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
+                       WREG32(CP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
+                       WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
+               }
+
+               /* set the pointer to the MQD */
+               mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
+               mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
+               WREG32(CP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
+               WREG32(CP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
+               /* set MQD vmid to 0 */
+               mqd->queue_state.cp_mqd_control = RREG32(CP_MQD_CONTROL);
+               mqd->queue_state.cp_mqd_control &= ~MQD_VMID_MASK;
+               WREG32(CP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
+
+               /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
+               hqd_gpu_addr = rdev->ring[idx].gpu_addr >> 8;
+               mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
+               mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
+               WREG32(CP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
+               WREG32(CP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
+
+               /* set up the HQD, this is similar to CP_RB0_CNTL */
+               mqd->queue_state.cp_hqd_pq_control = RREG32(CP_HQD_PQ_CONTROL);
+               mqd->queue_state.cp_hqd_pq_control &=
+                       ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
+
+               mqd->queue_state.cp_hqd_pq_control |=
+                       drm_order(rdev->ring[idx].ring_size / 8);
+               mqd->queue_state.cp_hqd_pq_control |=
+                       (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8);
+#ifdef __BIG_ENDIAN
+               mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
+#endif
+               mqd->queue_state.cp_hqd_pq_control &=
+                       ~(UNORD_DISPATCH | ROQ_PQ_IB_FLIP | PQ_VOLATILE);
+               mqd->queue_state.cp_hqd_pq_control |=
+                       PRIV_STATE | KMD_QUEUE; /* assuming kernel queue control */
+               WREG32(CP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
+
+               /* only used if CP_PQ_WPTR_POLL_CNTL.WPTR_POLL_EN=1 */
+               if (i == 0)
+                       wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP1_WPTR_OFFSET;
+               else
+                       wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP2_WPTR_OFFSET;
+               mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
+               mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
+               WREG32(CP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
+               WREG32(CP_HQD_PQ_WPTR_POLL_ADDR_HI,
+                      mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
+
+               /* set the wb address wether it's enabled or not */
+               if (i == 0)
+                       wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET;
+               else
+                       wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET;
+               mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
+               mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
+                       upper_32_bits(wb_gpu_addr) & 0xffff;
+               WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR,
+                      mqd->queue_state.cp_hqd_pq_rptr_report_addr);
+               WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+                      mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
+
+               /* enable the doorbell if requested */
+               if (use_doorbell) {
+                       mqd->queue_state.cp_hqd_pq_doorbell_control =
+                               RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
+                       mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
+                       mqd->queue_state.cp_hqd_pq_doorbell_control |=
+                               DOORBELL_OFFSET(rdev->ring[idx].doorbell_offset / 4);
+                       mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
+                       mqd->queue_state.cp_hqd_pq_doorbell_control &=
+                               ~(DOORBELL_SOURCE | DOORBELL_HIT);
+
+               } else {
+                       mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
+               }
+               WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
+                      mqd->queue_state.cp_hqd_pq_doorbell_control);
+
+               /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
+               rdev->ring[idx].wptr = 0;
+               mqd->queue_state.cp_hqd_pq_wptr = rdev->ring[idx].wptr;
+               WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
+               rdev->ring[idx].rptr = RREG32(CP_HQD_PQ_RPTR);
+               mqd->queue_state.cp_hqd_pq_rptr = rdev->ring[idx].rptr;
+
+               /* set the vmid for the queue */
+               mqd->queue_state.cp_hqd_vmid = 0;
+               WREG32(CP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
+
+               /* activate the queue */
+               mqd->queue_state.cp_hqd_active = 1;
+               WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
+
+               cik_srbm_select(rdev, 0, 0, 0, 0);
+
+               radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
+               radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
+
+               rdev->ring[idx].ready = true;
+               r = radeon_ring_test(rdev, idx, &rdev->ring[idx]);
+               if (r)
+                       rdev->ring[idx].ready = false;
+       }
+
        return 0;
 }
 
-/* XXX temporary wrappers to handle both compute and gfx */
-/* XXX */
 static void cik_cp_enable(struct radeon_device *rdev, bool enable)
 {
        cik_cp_gfx_enable(rdev, enable);
        cik_cp_compute_enable(rdev, enable);
 }
 
-/* XXX */
 static int cik_cp_load_microcode(struct radeon_device *rdev)
 {
        int r;
@@ -2176,14 +3132,12 @@ static int cik_cp_load_microcode(struct radeon_device *rdev)
        return 0;
 }
 
-/* XXX */
 static void cik_cp_fini(struct radeon_device *rdev)
 {
        cik_cp_gfx_fini(rdev);
        cik_cp_compute_fini(rdev);
 }
 
-/* XXX */
 static int cik_cp_resume(struct radeon_device *rdev)
 {
        int r;
@@ -2804,6 +3758,22 @@ static void cik_print_gpu_status_regs(struct radeon_device *rdev)
                RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
        dev_info(rdev->dev, "  SDMA1_STATUS_REG   = 0x%08X\n",
                 RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
+       dev_info(rdev->dev, "  CP_STAT = 0x%08x\n", RREG32(CP_STAT));
+       dev_info(rdev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
+                RREG32(CP_STALLED_STAT1));
+       dev_info(rdev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
+                RREG32(CP_STALLED_STAT2));
+       dev_info(rdev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
+                RREG32(CP_STALLED_STAT3));
+       dev_info(rdev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
+                RREG32(CP_CPF_BUSY_STAT));
+       dev_info(rdev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
+                RREG32(CP_CPF_STALLED_STAT1));
+       dev_info(rdev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(CP_CPF_STATUS));
+       dev_info(rdev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(CP_CPC_BUSY_STAT));
+       dev_info(rdev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
+                RREG32(CP_CPC_STALLED_STAT1));
+       dev_info(rdev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(CP_CPC_STATUS));
 }
 
 /**
@@ -3351,7 +4321,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
        /* XXX SH_MEM regs */
        /* where to put LDS, scratch, GPUVM in FSA64 space */
        for (i = 0; i < 16; i++) {
-               WREG32(SRBM_GFX_CNTL, VMID(i));
+               cik_srbm_select(rdev, 0, 0, 0, i);
                /* CP and shaders */
                WREG32(SH_MEM_CONFIG, 0);
                WREG32(SH_MEM_APE1_BASE, 1);
@@ -3364,7 +4334,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
                WREG32(SDMA0_GFX_APE1_CNTL + SDMA1_REGISTER_OFFSET, 0);
                /* XXX SDMA RLC - todo */
        }
-       WREG32(SRBM_GFX_CNTL, 0);
+       cik_srbm_select(rdev, 0, 0, 0, 0);
 
        cik_pcie_gart_tlb_flush(rdev);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -3471,6 +4441,29 @@ void cik_vm_fini(struct radeon_device *rdev)
 {
 }
 
+/**
+ * cik_vm_decode_fault - print human readable fault info
+ *
+ * @rdev: radeon_device pointer
+ * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
+ * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ *
+ * Print human readable fault information (CIK).
+ */
+static void cik_vm_decode_fault(struct radeon_device *rdev,
+                               u32 status, u32 addr, u32 mc_client)
+{
+       u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
+       u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
+       u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
+       char *block = (char *)&mc_client;
+
+       printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
+              protections, vmid, addr,
+              (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
+              block, mc_id);
+}
+
 /**
  * cik_vm_flush - cik vm flush using the CP
  *
@@ -3545,9 +4538,12 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
        radeon_ring_write(ring, 0);
        radeon_ring_write(ring, 1 << vm->id);
 
-       /* sync PFP to ME, otherwise we might get invalid PFP reads */
-       radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
-       radeon_ring_write(ring, 0x0);
+       /* compute doesn't have PFP */
+       if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
+               /* sync PFP to ME, otherwise we might get invalid PFP reads */
+               radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+               radeon_ring_write(ring, 0x0);
+       }
 }
 
 /**
@@ -4084,6 +5080,8 @@ int cik_irq_set(struct radeon_device *rdev)
 {
        u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE |
                PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
+       u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
+       u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
        u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
        u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
        u32 grbm_int_cntl = 0;
@@ -4111,13 +5109,106 @@ int cik_irq_set(struct radeon_device *rdev)
        dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
        dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
 
+       cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+       cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+       cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+       cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+       cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+       cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+       cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+       cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+
        /* enable CP interrupts on all rings */
        if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
                DRM_DEBUG("cik_irq_set: sw int gfx\n");
                cp_int_cntl |= TIME_STAMP_INT_ENABLE;
        }
-       /* TODO: compute queues! */
-       /* CP_ME[1-2]_PIPE[0-3]_INT_CNTL */
+       if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
+               struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+               DRM_DEBUG("si_irq_set: sw int cp1\n");
+               if (ring->me == 1) {
+                       switch (ring->pipe) {
+                       case 0:
+                               cp_m1p0 |= TIME_STAMP_INT_ENABLE;
+                               break;
+                       case 1:
+                               cp_m1p1 |= TIME_STAMP_INT_ENABLE;
+                               break;
+                       case 2:
+                               cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+                               break;
+                       case 3:
+                               cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+                               break;
+                       default:
+                               DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
+                               break;
+                       }
+               } else if (ring->me == 2) {
+                       switch (ring->pipe) {
+                       case 0:
+                               cp_m2p0 |= TIME_STAMP_INT_ENABLE;
+                               break;
+                       case 1:
+                               cp_m2p1 |= TIME_STAMP_INT_ENABLE;
+                               break;
+                       case 2:
+                               cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+                               break;
+                       case 3:
+                               cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+                               break;
+                       default:
+                               DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
+                               break;
+                       }
+               } else {
+                       DRM_DEBUG("si_irq_set: sw int cp1 invalid me %d\n", ring->me);
+               }
+       }
+       if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
+               struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+               DRM_DEBUG("si_irq_set: sw int cp2\n");
+               if (ring->me == 1) {
+                       switch (ring->pipe) {
+                       case 0:
+                               cp_m1p0 |= TIME_STAMP_INT_ENABLE;
+                               break;
+                       case 1:
+                               cp_m1p1 |= TIME_STAMP_INT_ENABLE;
+                               break;
+                       case 2:
+                               cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+                               break;
+                       case 3:
+                               cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+                               break;
+                       default:
+                               DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
+                               break;
+                       }
+               } else if (ring->me == 2) {
+                       switch (ring->pipe) {
+                       case 0:
+                               cp_m2p0 |= TIME_STAMP_INT_ENABLE;
+                               break;
+                       case 1:
+                               cp_m2p1 |= TIME_STAMP_INT_ENABLE;
+                               break;
+                       case 2:
+                               cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+                               break;
+                       case 3:
+                               cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+                               break;
+                       default:
+                               DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
+                               break;
+                       }
+               } else {
+                       DRM_DEBUG("si_irq_set: sw int cp2 invalid me %d\n", ring->me);
+               }
+       }
 
        if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
                DRM_DEBUG("cik_irq_set: sw int dma\n");
@@ -4189,6 +5280,15 @@ int cik_irq_set(struct radeon_device *rdev)
        WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
        WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
 
+       WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
+       WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1);
+       WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2);
+       WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3);
+       WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0);
+       WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1);
+       WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2);
+       WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3);
+
        WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 
        WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -4410,6 +5510,8 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
  */
 int cik_irq_process(struct radeon_device *rdev)
 {
+       struct radeon_ring *cp1_ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+       struct radeon_ring *cp2_ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
        u32 wptr;
        u32 rptr;
        u32 src_id, src_data, ring_id;
@@ -4417,6 +5519,7 @@ int cik_irq_process(struct radeon_device *rdev)
        u32 ring_index;
        bool queue_hotplug = false;
        bool queue_reset = false;
+       u32 addr, status, mc_client;
 
        if (!rdev->ih.enabled || rdev->shutdown)
                return IRQ_NONE;
@@ -4652,11 +5755,15 @@ restart_ih:
                        break;
                case 146:
                case 147:
+                       addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
+                       status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+                       mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
                        dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
                        dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
-                               RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+                               addr);
                        dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
-                               RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+                               status);
+                       cik_vm_decode_fault(rdev, status, addr, mc_client);
                        /* reset addr and status */
                        WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
                        break;
@@ -4675,10 +5782,11 @@ restart_ih:
                                radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                                break;
                        case 1:
-                               /* XXX compute */
-                               break;
                        case 2:
-                               /* XXX compute */
+                               if ((cp1_ring->me == me_id) & (cp1_ring->pipe == pipe_id))
+                                       radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+                               if ((cp2_ring->me == me_id) & (cp2_ring->pipe == pipe_id))
+                                       radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
                                break;
                        }
                        break;
@@ -4697,9 +5805,11 @@ restart_ih:
                                break;
                        case 1:
                                /* XXX compute */
+                               queue_reset = true;
                                break;
                        case 2:
                                /* XXX compute */
+                               queue_reset = true;
                                break;
                        }
                        break;
@@ -4718,9 +5828,11 @@ restart_ih:
                                break;
                        case 1:
                                /* XXX compute */
+                               queue_reset = true;
                                break;
                        case 2:
                                /* XXX compute */
+                               queue_reset = true;
                                break;
                        }
                        break;
@@ -4891,12 +6003,31 @@ static int cik_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       /* allocate mec buffers */
+       r = cik_mec_init(rdev);
+       if (r) {
+               DRM_ERROR("Failed to init MEC BOs!\n");
+               return r;
+       }
+
        r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
        if (r) {
                dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
                return r;
        }
 
+       r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
+       r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
        if (r) {
                dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
@@ -4941,6 +6072,32 @@ static int cik_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       /* set up the compute queues */
+       /* type-2 packets are deprecated on MEC, use type-3 instead */
+       ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+       r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
+                            CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
+                            0, 0xfffff, PACKET3(PACKET3_NOP, 0x3FFF));
+       if (r)
+               return r;
+       ring->me = 1; /* first MEC */
+       ring->pipe = 0; /* first pipe */
+       ring->queue = 0; /* first queue */
+       ring->wptr_offs = CIK_WB_CP1_WPTR_OFFSET;
+
+       /* type-2 packets are deprecated on MEC, use type-3 instead */
+       ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+       r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
+                            CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
+                            0, 0xffffffff, PACKET3(PACKET3_NOP, 0x3FFF));
+       if (r)
+               return r;
+       /* dGPU only have 1 MEC */
+       ring->me = 1; /* first MEC */
+       ring->pipe = 0; /* first pipe */
+       ring->queue = 1; /* second queue */
+       ring->wptr_offs = CIK_WB_CP2_WPTR_OFFSET;
+
        ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
                             SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET,
@@ -5008,6 +6165,9 @@ int cik_resume(struct radeon_device *rdev)
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
 
+       /* init golden registers */
+       cik_init_golden_registers(rdev);
+
        rdev->accel_working = true;
        r = cik_startup(rdev);
        if (r) {
@@ -5034,7 +6194,7 @@ int cik_suspend(struct radeon_device *rdev)
        radeon_vm_manager_fini(rdev);
        cik_cp_enable(rdev, false);
        cik_sdma_enable(rdev, false);
-       r600_uvd_rbc_stop(rdev);
+       r600_uvd_stop(rdev);
        radeon_uvd_suspend(rdev);
        cik_irq_suspend(rdev);
        radeon_wb_disable(rdev);
@@ -5086,6 +6246,8 @@ int cik_init(struct radeon_device *rdev)
                DRM_INFO("GPU not posted. posting now...\n");
                atom_asic_init(rdev->mode_info.atom_context);
        }
+       /* init golden registers */
+       cik_init_golden_registers(rdev);
        /* Initialize scratch registers */
        cik_scratch_init(rdev);
        /* Initialize surface registers */
@@ -5111,6 +6273,20 @@ int cik_init(struct radeon_device *rdev)
        ring->ring_obj = NULL;
        r600_ring_init(rdev, ring, 1024 * 1024);
 
+       ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+       ring->ring_obj = NULL;
+       r600_ring_init(rdev, ring, 1024 * 1024);
+       r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
+       if (r)
+               return r;
+
+       ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+       ring->ring_obj = NULL;
+       r600_ring_init(rdev, ring, 1024 * 1024);
+       r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
+       if (r)
+               return r;
+
        ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
        ring->ring_obj = NULL;
        r600_ring_init(rdev, ring, 256 * 1024);
@@ -5141,6 +6317,7 @@ int cik_init(struct radeon_device *rdev)
                cik_sdma_fini(rdev);
                cik_irq_fini(rdev);
                si_rlc_fini(rdev);
+               cik_mec_fini(rdev);
                radeon_wb_fini(rdev);
                radeon_ib_pool_fini(rdev);
                radeon_vm_manager_fini(rdev);
@@ -5176,10 +6353,12 @@ void cik_fini(struct radeon_device *rdev)
        cik_sdma_fini(rdev);
        cik_irq_fini(rdev);
        si_rlc_fini(rdev);
+       cik_mec_fini(rdev);
        radeon_wb_fini(rdev);
        radeon_vm_manager_fini(rdev);
        radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
+       r600_uvd_stop(rdev);
        radeon_uvd_fini(rdev);
        cik_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
@@ -5800,7 +6979,7 @@ int cik_uvd_resume(struct radeon_device *rdev)
 
        /* programm the VCPU memory controller bits 0-27 */
        addr = rdev->uvd.gpu_addr >> 3;
-       size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
+       size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3;
        WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
        WREG32(UVD_VCPU_CACHE_SIZE0, size);