2 * drivers/video/tegra/host/gk20a/pmu_gk20a.h
4 * GK20A PMU (aka. gPMU outside gk20a context)
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 #ifndef __PMU_GK20A_H__
22 #define __PMU_GK20A_H__
24 /* defined by pmu hw spec */
25 #define GK20A_PMU_VA_START ((128 * 1024) << 10)
26 #define GK20A_PMU_VA_SIZE (512 * 1024 * 1024)
27 #define GK20A_PMU_INST_SIZE (4 * 1024)
28 #define GK20A_PMU_UCODE_SIZE_MAX (256 * 1024)
29 #define GK20A_PMU_SEQ_BUF_SIZE 4096
31 #define ZBC_MASK(i) (~(~(0) << ((i)+1)) & 0xfffe)
33 /* PMU Command/Message Interfaces for Adaptive Power */
34 /* Macro to get Histogram index */
35 #define PMU_AP_HISTOGRAM(idx) (idx)
36 #define PMU_AP_HISTOGRAM_CONT (4)
38 /* Total number of histogram bins */
39 #define PMU_AP_CFG_HISTOGRAM_BIN_N (16)
41 /* Mapping between Idle counters and histograms */
42 #define PMU_AP_IDLE_MASK_HIST_IDX_0 (2)
43 #define PMU_AP_IDLE_MASK_HIST_IDX_1 (3)
44 #define PMU_AP_IDLE_MASK_HIST_IDX_2 (5)
45 #define PMU_AP_IDLE_MASK_HIST_IDX_3 (6)
48 /* Mapping between AP_CTRLs and Histograms */
49 #define PMU_AP_HISTOGRAM_IDX_GRAPHICS (PMU_AP_HISTOGRAM(1))
51 /* Mapping between AP_CTRLs and Idle counters */
52 #define PMU_AP_IDLE_MASK_GRAPHICS (PMU_AP_IDLE_MASK_HIST_IDX_1)
54 #define APP_VERSION_1 17997577
55 #define APP_VERSION_0 16856675
58 enum pmu_perfmon_cmd_start_fields {
62 /* Adaptive Power Controls (AP_CTRL) */
64 PMU_AP_CTRL_ID_GRAPHICS = 0x0,
65 /* PMU_AP_CTRL_ID_MS ,*/
69 /* AP_CTRL Statistics */
70 struct pmu_ap_ctrl_stat {
72 * Represents whether AP is active or not
73 * TODO: This is NvBool in RM; is that 1 byte of 4 bytes?
77 /* Idle filter represented by histogram bin index */
81 /* Total predicted power saving cycles. */
82 s32 power_saving_h_cycles;
84 /* Counts how many times AP gave us -ve power benefits. */
85 u32 bad_decision_count;
88 * Number of times ap structure needs to skip AP iterations
89 * KICK_CTRL from kernel updates this parameter.
92 u8 bin[PMU_AP_CFG_HISTOGRAM_BIN_N];
95 /* Parameters initialized by INITn APCTRL command */
96 struct pmu_ap_ctrl_init_params {
97 /* Minimum idle filter value in Us */
98 u32 min_idle_filter_us;
101 * Minimum Targeted Saving in Us. AP will update idle thresholds only
102 * if power saving achieved by updating idle thresholds is greater than
103 * Minimum targeted saving.
105 u32 min_target_saving_us;
107 /* Minimum targeted residency of power feature in Us */
108 u32 power_break_even_us;
111 * Maximum number of allowed power feature cycles per sample.
113 * We are allowing at max "pgPerSampleMax" cycles in one iteration of AP
114 * AKA pgPerSampleMax in original algorithm.
116 u32 cycles_per_sample_max;
119 /* AP Commands/Message structures */
122 * Structure for Generic AP Commands
124 struct pmu_ap_cmd_common {
130 * Structure for INIT AP command
132 struct pmu_ap_cmd_init {
136 u32 pg_sampling_period_us;
140 * Structure for Enable/Disable ApCtrl Commands
142 struct pmu_ap_cmd_enable_ctrl {
149 struct pmu_ap_cmd_disable_ctrl {
157 * Structure for INIT command
159 struct pmu_ap_cmd_init_ctrl {
163 struct pmu_ap_ctrl_init_params params;
166 struct pmu_ap_cmd_init_and_enable_ctrl {
170 struct pmu_ap_ctrl_init_params params;
174 * Structure for KICK_CTRL command
176 struct pmu_ap_cmd_kick_ctrl {
185 * Structure for PARAM command
187 struct pmu_ap_cmd_param {
196 * Defines for AP commands
199 PMU_AP_CMD_ID_INIT = 0x0 ,
200 PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL,
201 PMU_AP_CMD_ID_ENABLE_CTRL ,
202 PMU_AP_CMD_ID_DISABLE_CTRL ,
203 PMU_AP_CMD_ID_KICK_CTRL ,
211 struct pmu_ap_cmd_common cmn;
212 struct pmu_ap_cmd_init init;
213 struct pmu_ap_cmd_init_and_enable_ctrl init_and_enable_ctrl;
214 struct pmu_ap_cmd_enable_ctrl enable_ctrl;
215 struct pmu_ap_cmd_disable_ctrl disable_ctrl;
216 struct pmu_ap_cmd_kick_ctrl kick_ctrl;
220 * Structure for generic AP Message
222 struct pmu_ap_msg_common {
228 * Structure for INIT_ACK Message
230 struct pmu_ap_msg_init_ack {
234 u32 stats_dmem_offset;
238 * Defines for AP messages
241 PMU_AP_MSG_ID_INIT_ACK = 0x0,
249 struct pmu_ap_msg_common cmn;
250 struct pmu_ap_msg_init_ack init_ack;
253 /* Default Sampling Period of AELPG */
254 #define APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US (1000000)
256 /* Default values of APCTRL parameters */
257 #define APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US (100)
258 #define APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US (10000)
259 #define APCTRL_POWER_BREAKEVEN_DEFAULT_US (2000)
260 #define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200)
263 * Disable reason for Adaptive Power Controller
266 APCTRL_DISABLE_REASON_RM_UNLOAD,
267 APCTRL_DISABLE_REASON_RMCTRL,
271 * Adaptive Power Controller
274 u32 stats_dmem_offset;
275 u32 disable_reason_mask;
276 struct pmu_ap_ctrl_stat stat_cache;
281 * Adaptive Power structure
283 * ap structure provides generic infrastructure to make any power feature
288 struct ap_ctrl ap_ctrl[PMU_AP_CTRL_ID_MAX];
293 GK20A_PMU_DMAIDX_UCODE = 0,
294 GK20A_PMU_DMAIDX_VIRT = 1,
295 GK20A_PMU_DMAIDX_PHYS_VID = 2,
296 GK20A_PMU_DMAIDX_PHYS_SYS_COH = 3,
297 GK20A_PMU_DMAIDX_PHYS_SYS_NCOH = 4,
298 GK20A_PMU_DMAIDX_RSVD = 5,
299 GK20A_PMU_DMAIDX_PELPG = 6,
300 GK20A_PMU_DMAIDX_END = 7
321 /* Make sure size of this structure is a multiple of 4 bytes */
322 struct pmu_cmdline_args_v0 {
323 u32 cpu_freq_hz; /* Frequency of the clock driving PMU */
324 u32 falc_trace_size; /* falctrace buffer size (bytes) */
325 u32 falc_trace_dma_base; /* 256-byte block address */
326 u32 falc_trace_dma_idx; /* dmaIdx for DMA operations */
327 struct pmu_mem_v0 gc6_ctx; /* dmem offset of gc6 context */
330 struct pmu_cmdline_args_v1 {
331 u32 cpu_freq_hz; /* Frequency of the clock driving PMU */
332 u32 falc_trace_size; /* falctrace buffer size (bytes) */
333 u32 falc_trace_dma_base; /* 256-byte block address */
334 u32 falc_trace_dma_idx; /* dmaIdx for DMA operations */
336 struct pmu_mem_v1 gc6_ctx; /* dmem offset of gc6 context */
339 #define GK20A_PMU_DMEM_BLKSIZE2 8
341 #define GK20A_PMU_UCODE_NB_MAX_OVERLAY 32
342 #define GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH 64
344 struct pmu_ucode_desc {
349 char date[GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH];
350 u32 bootloader_start_offset;
352 u32 bootloader_imem_offset;
353 u32 bootloader_entry_point;
354 u32 app_start_offset;
359 u32 app_resident_code_offset; /* Offset from appStartOffset */
360 u32 app_resident_code_size; /* Exact size of the resident code ( potentially contains CRC inside at the end ) */
361 u32 app_resident_data_offset; /* Offset from appStartOffset */
362 u32 app_resident_data_size; /* Exact size of the resident code ( potentially contains CRC inside at the end ) */
364 struct {u32 start; u32 size;} load_ovl[GK20A_PMU_UCODE_NB_MAX_OVERLAY];
368 #define PMU_UNIT_REWIND (0x00)
369 #define PMU_UNIT_I2C (0x01)
370 #define PMU_UNIT_SEQ (0x02)
371 #define PMU_UNIT_PG (0x03)
372 #define PMU_UNIT_AVAILABLE1 (0x04)
373 #define PMU_UNIT_AVAILABLE2 (0x05)
374 #define PMU_UNIT_MEM (0x06)
375 #define PMU_UNIT_INIT (0x07)
376 #define PMU_UNIT_FBBA (0x08)
377 #define PMU_UNIT_DIDLE (0x09)
378 #define PMU_UNIT_AVAILABLE3 (0x0A)
379 #define PMU_UNIT_AVAILABLE4 (0x0B)
380 #define PMU_UNIT_HDCP_MAIN (0x0C)
381 #define PMU_UNIT_HDCP_V (0x0D)
382 #define PMU_UNIT_HDCP_SRM (0x0E)
383 #define PMU_UNIT_NVDPS (0x0F)
384 #define PMU_UNIT_DEINIT (0x10)
385 #define PMU_UNIT_AVAILABLE5 (0x11)
386 #define PMU_UNIT_PERFMON (0x12)
387 #define PMU_UNIT_FAN (0x13)
388 #define PMU_UNIT_PBI (0x14)
389 #define PMU_UNIT_ISOBLIT (0x15)
390 #define PMU_UNIT_DETACH (0x16)
391 #define PMU_UNIT_DISP (0x17)
392 #define PMU_UNIT_HDCP (0x18)
393 #define PMU_UNIT_REGCACHE (0x19)
394 #define PMU_UNIT_SYSMON (0x1A)
395 #define PMU_UNIT_THERM (0x1B)
396 #define PMU_UNIT_PMGR (0x1C)
397 #define PMU_UNIT_PERF (0x1D)
398 #define PMU_UNIT_PCM (0x1E)
399 #define PMU_UNIT_RC (0x1F)
400 #define PMU_UNIT_NULL (0x20)
401 #define PMU_UNIT_LOGGER (0x21)
402 #define PMU_UNIT_SMBPBI (0x22)
403 #define PMU_UNIT_END (0x23)
405 #define PMU_UNIT_TEST_START (0xFE)
406 #define PMU_UNIT_END_SIM (0xFF)
407 #define PMU_UNIT_TEST_END (0xFF)
409 #define PMU_UNIT_ID_IS_VALID(id) \
410 (((id) < PMU_UNIT_END) || ((id) >= PMU_UNIT_TEST_START))
412 #define PMU_DMEM_ALLOC_ALIGNMENT (32)
413 #define PMU_DMEM_ALIGNMENT (4)
415 #define PMU_CMD_FLAGS_PMU_MASK (0xF0)
417 #define PMU_CMD_FLAGS_STATUS BIT(0)
418 #define PMU_CMD_FLAGS_INTR BIT(1)
419 #define PMU_CMD_FLAGS_EVENT BIT(2)
420 #define PMU_CMD_FLAGS_WATERMARK BIT(3)
428 #define PMU_MSG_HDR_SIZE sizeof(struct pmu_hdr)
429 #define PMU_CMD_HDR_SIZE sizeof(struct pmu_hdr)
431 #define PMU_QUEUE_COUNT 5
433 struct pmu_allocation_v0 {
437 struct pmu_dmem dmem;
438 struct pmu_mem_v0 fb;
442 struct pmu_allocation_v1 {
444 struct pmu_dmem dmem;
445 struct pmu_mem_v1 fb;
450 PMU_INIT_MSG_TYPE_PMU_INIT = 0,
453 struct pmu_init_msg_pmu_v0 {
462 } queue_info[PMU_QUEUE_COUNT];
464 u16 sw_managed_area_offset;
465 u16 sw_managed_area_size;
468 struct pmu_init_msg_pmu_v1 {
471 u16 os_debug_entry_point;
478 } queue_info[PMU_QUEUE_COUNT];
480 u16 sw_managed_area_offset;
481 u16 sw_managed_area_size;
484 union pmu_init_msg_pmu {
485 struct pmu_init_msg_pmu_v0 v0;
486 struct pmu_init_msg_pmu_v1 v1;
489 struct pmu_init_msg {
492 struct pmu_init_msg_pmu_v1 pmu_init_v1;
493 struct pmu_init_msg_pmu_v0 pmu_init_v0;
498 PMU_PG_ELPG_MSG_INIT_ACK,
499 PMU_PG_ELPG_MSG_DISALLOW_ACK,
500 PMU_PG_ELPG_MSG_ALLOW_ACK,
501 PMU_PG_ELPG_MSG_FREEZE_ACK,
502 PMU_PG_ELPG_MSG_FREEZE_ABORT,
503 PMU_PG_ELPG_MSG_UNFREEZE_ACK,
506 struct pmu_pg_msg_elpg_msg {
513 PMU_PG_STAT_MSG_RESP_DMEM_OFFSET = 0,
516 struct pmu_pg_msg_stat {
524 PMU_PG_MSG_ENG_BUF_LOADED,
525 PMU_PG_MSG_ENG_BUF_UNLOADED,
526 PMU_PG_MSG_ENG_BUF_FAILED,
529 struct pmu_pg_msg_eng_buf_stat {
539 struct pmu_pg_msg_elpg_msg elpg_msg;
540 struct pmu_pg_msg_stat stat;
541 struct pmu_pg_msg_eng_buf_stat eng_buf_stat;
542 /* TBD: other pg messages */
543 union pmu_ap_msg ap_msg;
548 PMU_RC_MSG_TYPE_UNHANDLED_CMD = 0,
551 struct pmu_rc_msg_unhandled_cmd {
558 struct pmu_rc_msg_unhandled_cmd unhandled_cmd;
562 PMU_PG_CMD_ID_ELPG_CMD = 0,
563 PMU_PG_CMD_ID_ENG_BUF_LOAD,
564 PMU_PG_CMD_ID_ENG_BUF_UNLOAD,
565 PMU_PG_CMD_ID_PG_STAT,
566 PMU_PG_CMD_ID_PG_LOG_INIT,
567 PMU_PG_CMD_ID_PG_LOG_FLUSH,
568 PMU_PG_CMD_ID_PG_PARAM,
569 PMU_PG_CMD_ID_ELPG_INIT,
570 PMU_PG_CMD_ID_ELPG_POLL_CTXSAVE,
571 PMU_PG_CMD_ID_ELPG_ABORT_POLL,
572 PMU_PG_CMD_ID_ELPG_PWR_UP,
573 PMU_PG_CMD_ID_ELPG_DISALLOW,
574 PMU_PG_CMD_ID_ELPG_ALLOW,
576 RM_PMU_PG_CMD_ID_PSI,
578 PMU_PG_CMD_ID_ZBC_TABLE_UPDATE,
579 PMU_PG_CMD_ID_PWR_RAIL_GATE_DISABLE = 0x20,
580 PMU_PG_CMD_ID_PWR_RAIL_GATE_ENABLE,
581 PMU_PG_CMD_ID_PWR_RAIL_SMU_MSG_DISABLE
585 PMU_PG_ELPG_CMD_INIT,
586 PMU_PG_ELPG_CMD_DISALLOW,
587 PMU_PG_ELPG_CMD_ALLOW,
588 PMU_PG_ELPG_CMD_FREEZE,
589 PMU_PG_ELPG_CMD_UNFREEZE,
592 struct pmu_pg_cmd_elpg_cmd {
598 struct pmu_pg_cmd_eng_buf_load {
610 PMU_PG_STAT_CMD_ALLOC_DMEM = 0,
613 struct pmu_pg_cmd_stat {
623 struct pmu_pg_cmd_elpg_cmd elpg_cmd;
624 struct pmu_pg_cmd_eng_buf_load eng_buf_load;
625 struct pmu_pg_cmd_stat stat;
626 /* TBD: other pg commands */
627 union pmu_ap_cmd ap_cmd;
632 #define PMU_DOMAIN_GROUP_PSTATE 0
633 #define PMU_DOMAIN_GROUP_GPC2CLK 1
634 #define PMU_DOMAIN_GROUP_NUM 2
636 /* TBD: smart strategy */
637 #define PMU_PERFMON_PCT_TO_INC 58
638 #define PMU_PERFMON_PCT_TO_DEC 23
640 struct pmu_perfmon_counter {
645 u16 upper_threshold; /* units of 0.01% */
646 u16 lower_threshold; /* units of 0.01% */
649 #define PMU_PERFMON_FLAG_ENABLE_INCREASE (0x00000001)
650 #define PMU_PERFMON_FLAG_ENABLE_DECREASE (0x00000002)
651 #define PMU_PERFMON_FLAG_CLEAR_PREV (0x00000004)
655 PMU_PERFMON_CMD_ID_START = 0,
656 PMU_PERFMON_CMD_ID_STOP = 1,
657 PMU_PERFMON_CMD_ID_INIT = 2
660 struct pmu_perfmon_cmd_start_v1 {
665 struct pmu_allocation_v1 counter_alloc;
668 struct pmu_perfmon_cmd_start_v0 {
673 struct pmu_allocation_v0 counter_alloc;
676 struct pmu_perfmon_cmd_stop {
680 struct pmu_perfmon_cmd_init_v1 {
682 u8 to_decrease_count;
684 u32 sample_period_us;
685 struct pmu_allocation_v1 counter_alloc;
687 u8 samples_in_moving_avg;
691 struct pmu_perfmon_cmd_init_v0 {
693 u8 to_decrease_count;
695 u32 sample_period_us;
696 struct pmu_allocation_v0 counter_alloc;
698 u8 samples_in_moving_avg;
702 struct pmu_perfmon_cmd {
705 struct pmu_perfmon_cmd_start_v0 start_v0;
706 struct pmu_perfmon_cmd_start_v1 start_v1;
707 struct pmu_perfmon_cmd_stop stop;
708 struct pmu_perfmon_cmd_init_v0 init_v0;
709 struct pmu_perfmon_cmd_init_v1 init_v1;
721 PMU_PERFMON_MSG_ID_INCREASE_EVENT = 0,
722 PMU_PERFMON_MSG_ID_DECREASE_EVENT = 1,
723 PMU_PERFMON_MSG_ID_INIT_EVENT = 2,
724 PMU_PERFMON_MSG_ID_ACK = 3
727 struct pmu_perfmon_msg_generic {
734 struct pmu_perfmon_msg {
737 struct pmu_perfmon_msg_generic gen;
745 struct pmu_perfmon_cmd perfmon;
746 struct pmu_pg_cmd pg;
747 struct pmu_zbc_cmd zbc;
754 struct pmu_init_msg init;
755 struct pmu_perfmon_msg perfmon;
756 struct pmu_pg_msg pg;
757 struct pmu_rc_msg rc;
761 #define PMU_SHA1_GID_SIGNATURE 0xA7C66AD2
762 #define PMU_SHA1_GID_SIGNATURE_SIZE 4
764 #define PMU_SHA1_GID_SIZE 16
766 struct pmu_sha1_gid {
768 u8 gid[PMU_SHA1_GID_SIZE];
771 struct pmu_sha1_gid_data {
772 u8 signature[PMU_SHA1_GID_SIGNATURE_SIZE];
773 u8 gid[PMU_SHA1_GID_SIZE];
776 #define PMU_COMMAND_QUEUE_HPQ 0 /* write by sw, read by pmu, protected by sw mutex lock */
777 #define PMU_COMMAND_QUEUE_LPQ 1 /* write by sw, read by pmu, protected by sw mutex lock */
778 #define PMU_COMMAND_QUEUE_BIOS 2 /* read/write by sw/hw, protected by hw pmu mutex, id = 2 */
779 #define PMU_COMMAND_QUEUE_SMI 3 /* read/write by sw/hw, protected by hw pmu mutex, id = 3 */
780 #define PMU_MESSAGE_QUEUE 4 /* write by pmu, read by sw, accessed by interrupt handler, no lock */
781 #define PMU_QUEUE_COUNT 5
784 PMU_MUTEX_ID_RSVD1 = 0 ,
785 PMU_MUTEX_ID_GPUSER ,
786 PMU_MUTEX_ID_QUEUE_BIOS ,
787 PMU_MUTEX_ID_QUEUE_SMI ,
788 PMU_MUTEX_ID_GPMUTEX ,
790 PMU_MUTEX_ID_RMLOCK ,
791 PMU_MUTEX_ID_MSGBOX ,
803 #define PMU_IS_COMMAND_QUEUE(id) \
804 ((id) < PMU_MESSAGE_QUEUE)
806 #define PMU_IS_SW_COMMAND_QUEUE(id) \
807 (((id) == PMU_COMMAND_QUEUE_HPQ) || \
808 ((id) == PMU_COMMAND_QUEUE_LPQ))
810 #define PMU_IS_MESSAGE_QUEUE(id) \
811 ((id) == PMU_MESSAGE_QUEUE)
819 #define QUEUE_SET (true)
820 #define QUEUE_GET (false)
822 #define QUEUE_ALIGNMENT (4)
824 #define PMU_PGENG_GR_BUFFER_IDX_INIT (0)
825 #define PMU_PGENG_GR_BUFFER_IDX_ZBC (1)
826 #define PMU_PGENG_GR_BUFFER_IDX_FECS (2)
830 PMU_DMAIDX_UCODE = 0,
832 PMU_DMAIDX_PHYS_VID = 2,
833 PMU_DMAIDX_PHYS_SYS_COH = 3,
834 PMU_DMAIDX_PHYS_SYS_NCOH = 4,
836 PMU_DMAIDX_PELPG = 6,
845 /* used by hw, for BIOS/SMI queue */
848 /* used by sw, for LPQ/HPQ queue */
851 /* current write position */
853 /* physical dmem offset where this queue begins */
855 /* logical queue identifier */
857 /* physical queue index */
864 bool opened; /* opened implies locked */
868 #define PMU_MUTEX_ID_IS_VALID(id) \
869 ((id) < PMU_MUTEX_ID_INVALID)
871 #define PMU_INVALID_MUTEX_OWNER_ID (0)
879 #define PMU_MAX_NUM_SEQUENCES (256)
880 #define PMU_SEQ_BIT_SHIFT (5)
881 #define PMU_SEQ_TBL_SIZE \
882 (PMU_MAX_NUM_SEQUENCES >> PMU_SEQ_BIT_SHIFT)
884 #define PMU_INVALID_SEQ_DESC (~0)
888 PMU_SEQ_STATE_FREE = 0,
889 PMU_SEQ_STATE_PENDING,
891 PMU_SEQ_STATE_CANCELLED
902 typedef void (*pmu_callback)(struct gk20a *, struct pmu_msg *, void *, u32,
905 struct pmu_sequence {
911 struct pmu_allocation_v0 in_v0;
912 struct pmu_allocation_v1 in_v1;
915 struct pmu_allocation_v0 out_v0;
916 struct pmu_allocation_v1 out_v1;
919 pmu_callback callback;
923 struct pmu_pg_stats {
924 u64 pg_entry_start_timestamp;
925 u64 pg_ingating_start_timestamp;
926 u64 pg_exit_start_timestamp;
927 u64 pg_ungating_start_timestamp;
928 u32 pg_avg_entry_time_us;
930 u32 pg_ingating_time_us;
931 u32 pg_avg_exit_time_us;
932 u32 pg_ungating_count;
933 u32 pg_ungating_time_us;
935 u32 pg_gating_deny_cnt;
938 #define PMU_PG_IDLE_THRESHOLD_SIM 1000
939 #define PMU_PG_POST_POWERUP_IDLE_THRESHOLD_SIM 4000000
940 /* TBD: QT or else ? */
941 #define PMU_PG_IDLE_THRESHOLD 15000
942 #define PMU_PG_POST_POWERUP_IDLE_THRESHOLD 1000000
944 /* state transition :
945 OFF => [OFF_ON_PENDING optional] => ON_PENDING => ON => OFF
946 ON => OFF is always synchronized */
947 #define PMU_ELPG_STAT_OFF 0 /* elpg is off */
948 #define PMU_ELPG_STAT_ON 1 /* elpg is on */
949 #define PMU_ELPG_STAT_ON_PENDING 2 /* elpg is off, ALLOW cmd has been sent, wait for ack */
950 #define PMU_ELPG_STAT_OFF_PENDING 3 /* elpg is on, DISALLOW cmd has been sent, wait for ack */
951 #define PMU_ELPG_STAT_OFF_ON_PENDING 4 /* elpg is off, caller has requested on, but ALLOW
952 cmd hasn't been sent due to ENABLE_ALLOW delay */
954 /* Falcon Register index */
955 #define PMU_FALCON_REG_R0 (0)
956 #define PMU_FALCON_REG_R1 (1)
957 #define PMU_FALCON_REG_R2 (2)
958 #define PMU_FALCON_REG_R3 (3)
959 #define PMU_FALCON_REG_R4 (4)
960 #define PMU_FALCON_REG_R5 (5)
961 #define PMU_FALCON_REG_R6 (6)
962 #define PMU_FALCON_REG_R7 (7)
963 #define PMU_FALCON_REG_R8 (8)
964 #define PMU_FALCON_REG_R9 (9)
965 #define PMU_FALCON_REG_R10 (10)
966 #define PMU_FALCON_REG_R11 (11)
967 #define PMU_FALCON_REG_R12 (12)
968 #define PMU_FALCON_REG_R13 (13)
969 #define PMU_FALCON_REG_R14 (14)
970 #define PMU_FALCON_REG_R15 (15)
971 #define PMU_FALCON_REG_IV0 (16)
972 #define PMU_FALCON_REG_IV1 (17)
973 #define PMU_FALCON_REG_UNDEFINED (18)
974 #define PMU_FALCON_REG_EV (19)
975 #define PMU_FALCON_REG_SP (20)
976 #define PMU_FALCON_REG_PC (21)
977 #define PMU_FALCON_REG_IMB (22)
978 #define PMU_FALCON_REG_DMB (23)
979 #define PMU_FALCON_REG_CSW (24)
980 #define PMU_FALCON_REG_CCR (25)
981 #define PMU_FALCON_REG_SEC (26)
982 #define PMU_FALCON_REG_CTX (27)
983 #define PMU_FALCON_REG_EXCI (28)
984 #define PMU_FALCON_REG_RSVD0 (29)
985 #define PMU_FALCON_REG_RSVD1 (30)
986 #define PMU_FALCON_REG_RSVD2 (31)
987 #define PMU_FALCON_REG_SIZE (32)
989 /* Choices for pmu_state */
990 #define PMU_STATE_OFF 0 /* PMU is off */
991 #define PMU_STATE_STARTING 1 /* PMU is booting */
992 #define PMU_STATE_ELPG_BOOTED 2 /* ELPG is initialized */
993 #define PMU_STATE_LOADING_PG_BUF 3 /* Loading PG buf */
994 #define PMU_STATE_LOADING_ZBC 4 /* Loading ZBC buf */
995 #define PMU_STATE_STARTED 5 /* Fully unitialized */
1001 struct pmu_ucode_desc *desc;
1002 struct pmu_mem_desc ucode;
1004 struct pmu_mem_desc pg_buf;
1005 /* TBD: remove this if ZBC seq is fixed */
1006 struct pmu_mem_desc seq_buf;
1009 struct pmu_sha1_gid gid_info;
1011 struct pmu_queue queue[PMU_QUEUE_COUNT];
1013 struct pmu_sequence *seq;
1014 unsigned long pmu_seq_tbl[PMU_SEQ_TBL_SIZE];
1017 struct pmu_mutex *mutex;
1020 struct mutex pmu_copy_lock;
1021 struct mutex pmu_seq_lock;
1023 struct gk20a_allocator dmem;
1030 u32 stat_dmem_offset;
1035 wait_queue_head_t boot_wq;
1037 #define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1 /* msec */
1038 struct work_struct pg_init;
1039 struct mutex elpg_mutex; /* protect elpg enable/disable */
1040 int elpg_refcnt; /* disable -1, enable +1, <=0 elpg disabled, > 0 elpg enabled */
1042 struct pmu_perfmon_counter perfmon_counter;
1043 u32 perfmon_state_id[PMU_DOMAIN_GROUP_NUM];
1047 void (*remove_support)(struct pmu_gk20a *pmu);
1053 struct mutex isr_mutex;
1054 struct mutex isr_enable_lock;
1059 struct pmu_cmdline_args_v0 args_v0;
1060 struct pmu_cmdline_args_v1 args_v1;
1064 int gk20a_init_pmu_support(struct gk20a *g);
1065 int gk20a_init_pmu_bind_fecs(struct gk20a *g);
1067 void gk20a_pmu_isr(struct gk20a *g);
1069 /* send a cmd to pmu */
1070 int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, struct pmu_msg *msg,
1071 struct pmu_payload *payload, u32 queue_id,
1072 pmu_callback callback, void* cb_param,
1073 u32 *seq_desc, unsigned long timeout);
1075 int gk20a_pmu_enable_elpg(struct gk20a *g);
1076 int gk20a_pmu_disable_elpg(struct gk20a *g);
1078 void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries);
1080 int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable);
1082 int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token);
1083 int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token);
1084 int gk20a_pmu_destroy(struct gk20a *g);
1085 int gk20a_pmu_load_norm(struct gk20a *g, u32 *load);
1086 int gk20a_pmu_debugfs_init(struct platform_device *dev);
1087 void gk20a_pmu_reset_load_counters(struct gk20a *g);
1088 void gk20a_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles,
1091 #endif /*__PMU_GK20A_H__*/