1 /* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
3 * This software is licensed under the terms of the GNU General Public
4 * License version 2, as published by the Free Software Foundation, and
5 * may be copied, distributed, and modified under those terms.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 /* NVS = NVidia Sensor framework */
14 /* See nvs_iio.c and nvs.h for documentation */
17 #include <linux/i2c.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/kernel.h>
21 #include <linux/err.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/regulator/consumer.h>
26 #include <linux/nvs.h>
27 #include <linux/crc32.h>
28 #include <linux/mpu_iio.h>
32 #define NVI_DRIVER_VERSION (333)
33 #define NVI_VENDOR "Invensense"
34 #define NVI_NAME "mpu6xxx"
35 #define NVI_NAME_MPU6050 "mpu6050"
36 #define NVI_NAME_MPU6500 "mpu6500"
37 #define NVI_NAME_MPU6515 "mpu6515"
38 #define NVI_NAME_MPU9150 "mpu9150"
39 #define NVI_NAME_MPU9250 "mpu9250"
40 #define NVI_NAME_MPU9350 "mpu9350"
41 #define NVI_NAME_ICM20628 "icm20628"
42 #define NVI_NAME_ICM20630 "icm20630"
43 #define NVI_NAME_ICM20632 "icm20632"
44 #define NVI_HW_ID_AUTO (0xFF)
45 #define NVI_HW_ID_MPU6050 (0x68)
46 #define NVI_HW_ID_MPU6500 (0x70)
47 #define NVI_HW_ID_MPU6515 (0x74)
48 #define NVI_HW_ID_MPU9150 (0x68)
49 #define NVI_HW_ID_MPU9250 (0x71)
50 #define NVI_HW_ID_MPU9350 (0x72)
51 #define NVI_HW_ID_ICM20628 (0xA2)
52 #define NVI_HW_ID_ICM20630 (0xAB)
53 #define NVI_HW_ID_ICM20632 (0xAD)
54 /* NVI_FW_CRC_CHECK used only during development to confirm valid FW */
55 #define NVI_FW_CRC_CHECK (0)
59 struct work_struct fw_load_work;
60 const struct i2c_device_id *i2c_dev_id;
66 const struct nvi_hal *hal;
68 /* ARRAY_SIZE(nvi_id_hals) must match ARRAY_SIZE(nvi_i2c_device_id) - 1 */
82 /* enum NVI_NDX_N must match ARRAY_SIZE(nvi_i2c_device_id) - 1 */
83 static struct i2c_device_id nvi_i2c_device_id[] = {
84 { NVI_NAME, NVI_NDX_AUTO },
85 { NVI_NAME_MPU6050, NVI_NDX_MPU6050 },
86 { NVI_NAME_MPU6500, NVI_NDX_MPU6500 },
87 { NVI_NAME_MPU6515, NVI_NDX_MPU6515 },
88 { NVI_NAME_MPU9150, NVI_NDX_MPU9150 },
89 { NVI_NAME_MPU9250, NVI_NDX_MPU9250 },
90 { NVI_NAME_MPU9350, NVI_NDX_MPU9350 },
91 { NVI_NAME_ICM20628, NVI_NDX_ICM20628 },
92 { NVI_NAME_ICM20630, NVI_NDX_ICM20630 },
93 { NVI_NAME_ICM20632, NVI_NDX_ICM20632 },
105 NVI_INFO_REG_WR = 0xC6, /* use 0xD0 on cmd line */
113 /* regulator names in order of powering on */
114 static char *nvi_vregs[] = {
119 static struct nvi_state *nvi_state_local;
122 static int nvi_dmp_fw(struct nvi_state *st);
123 static int nvi_aux_bypass_enable(struct nvi_state *st, bool enable);
124 static int nvi_read(struct nvi_state *st, bool flush);
126 static int nvi_nb_vreg(struct nvi_state *st,
127 unsigned long event, unsigned int i)
129 if (event & REGULATOR_EVENT_POST_ENABLE)
130 st->ts_vreg_en[i] = nvs_timestamp();
131 else if (event & (REGULATOR_EVENT_DISABLE |
132 REGULATOR_EVENT_FORCE_DISABLE))
133 st->ts_vreg_en[i] = 0;
134 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
135 dev_info(&st->i2c->dev, "%s %s event=0x%x ts=%lld\n",
136 __func__, st->vreg[i].supply, (unsigned int)event,
141 static int nvi_nb_vreg_vdd(struct notifier_block *nb,
142 unsigned long event, void *ignored)
144 struct nvi_state *st = container_of(nb, struct nvi_state, nb_vreg[0]);
146 return nvi_nb_vreg(st, event, 0);
149 static int nvi_nb_vreg_vlogic(struct notifier_block *nb,
150 unsigned long event, void *ignored)
152 struct nvi_state *st = container_of(nb, struct nvi_state, nb_vreg[1]);
154 return nvi_nb_vreg(st, event, 1);
157 static int (* const nvi_nb_vreg_pf[])(struct notifier_block *nb,
158 unsigned long event, void *ignored) = {
163 void nvi_err(struct nvi_state *st)
170 static void nvi_mutex_lock(struct nvi_state *st)
175 for (i = 0; i < DEV_N; i++)
176 st->nvs->nvs_mutex_lock(st->snsr[i].nvs_st);
180 static void nvi_mutex_unlock(struct nvi_state *st)
185 for (i = 0; i < DEV_N; i++)
186 st->nvs->nvs_mutex_unlock(st->snsr[i].nvs_st);
190 static void nvi_disable_irq(struct nvi_state *st)
192 if (st->i2c->irq && !st->irq_dis) {
193 disable_irq_nosync(st->i2c->irq);
195 if (st->sts & NVS_STS_SPEW_MSG)
196 dev_info(&st->i2c->dev, "%s IRQ disabled\n", __func__);
200 static void nvi_enable_irq(struct nvi_state *st)
202 if (st->i2c->irq && st->irq_dis) {
203 enable_irq(st->i2c->irq);
205 if (st->sts & NVS_STS_SPEW_MSG)
206 dev_info(&st->i2c->dev, "%s IRQ enabled\n", __func__);
210 static void nvi_rc_clr(struct nvi_state *st, const char *fn)
214 for (i = 0; i < ARRAY_SIZE(st->rc_msk); i++)
216 if (st->sts & NVI_DBG_SPEW_MSG)
217 dev_info(&st->i2c->dev, "%s-%s\n", __func__, fn);
220 static int nvi_i2c_w(struct nvi_state *st, u16 len, u8 *buf)
224 msg.addr = st->i2c->addr;
228 if (i2c_transfer(st->i2c->adapter, &msg, 1) != 1) {
236 static int nvi_wr_reg_bank_sel(struct nvi_state *st, u8 reg_bank)
242 if (!st->hal->reg->reg_bank.reg)
246 if (st->rc_msk[NVI_RC_BANK_REG_BANK] & NVI_RC_MSK_REG_BANK) {
247 if (reg_bank == st->rc.reg_bank)
251 buf[0] = st->hal->reg->reg_bank.reg;
253 ret = nvi_i2c_w(st, sizeof(buf), buf);
255 dev_err(&st->i2c->dev, "%s 0x%x!->0x%x ERR=%d\n",
256 __func__, st->rc.reg_bank, reg_bank, ret);
257 st->rc_msk[NVI_RC_BANK_REG_BANK] &=
258 ~NVI_RC_MSK_REG_BANK;
260 if (st->sts & NVI_DBG_SPEW_MSG)
261 dev_info(&st->i2c->dev, "%s 0x%x->0x%x\n",
262 __func__, st->rc.reg_bank, reg_bank);
263 st->rc.reg_bank = reg_bank;
264 st->rc_msk[NVI_RC_BANK_REG_BANK] |=
271 static int nvi_i2c_write(struct nvi_state *st, u8 bank, u16 len, u8 *buf)
275 ret = nvi_wr_reg_bank_sel(st, bank);
277 ret = nvi_i2c_w(st, len, buf);
281 static int nvi_i2c_write_be(struct nvi_state *st, const struct nvi_br *br,
288 for (i = len; i > 0; i--)
289 buf[i] = (u8)(val >> (8 * (len - i)));
290 return nvi_i2c_write(st, br->bank, len + 1, buf);
293 static int nvi_i2c_write_le(struct nvi_state *st, const struct nvi_br *br,
300 for (i = 0; i < len; i++)
301 buf[i + 1] = (u8)(val >> (8 * i));
302 return nvi_i2c_write(st, br->bank, len + 1, buf);
305 int nvi_i2c_write_rc(struct nvi_state *st, const struct nvi_br *br, u32 val,
306 const char *fn, u8 *rc, bool be)
311 unsigned int rc_bank;
319 rc_bank <<= 7; /* registers only go to 0x7F */
321 rc_msk = ((1 << len) - 1) << (rc_bank % 64);
327 if ((st->rc_msk[rc_bank] & rc_msk) == rc_msk) {
328 /* register is cached */
329 for (i = 0; i < len; i++) {
331 (u8)(val >> (8 * i))) {
332 /* register data changed */
338 /* register not cached */
347 ret = nvi_i2c_write_be(st, br, len, val);
349 ret = nvi_i2c_write_le(st, br, len, val);
353 dev_err(&st->i2c->dev,
354 "%s 0x%08x!=>0x%01x%02x ERR=%d\n",
355 fn, val, br->bank, br->reg, ret);
356 st->rc_msk[rc_bank] &= ~rc_msk;
358 if (st->sts & NVI_DBG_SPEW_MSG && fn)
359 dev_info(&st->i2c->dev,
360 "%s 0x%08x=>0x%01x%02x\n",
361 fn, val, br->bank, br->reg);
363 for (i = 0; i < len; i++)
364 *(rc + i) = (u8)(val >> (8 * i));
365 st->rc_msk[rc_bank] |= rc_msk;
367 /* register data not cached */
368 st->rc_msk[rc_bank] &= ~rc_msk;
375 int nvi_i2c_wr(struct nvi_state *st, const struct nvi_br *br,
376 u8 val, const char *fn)
382 buf[1] = val | br->dflt;
383 ret = nvi_wr_reg_bank_sel(st, br->bank);
385 ret = nvi_i2c_w(st, sizeof(buf), buf);
389 dev_err(&st->i2c->dev,
390 "%s 0x%02x!=>0x%01x%02x ERR=%d\n",
391 fn, val, br->bank, br->reg, ret);
393 if (st->sts & NVI_DBG_SPEW_MSG && fn)
394 dev_info(&st->i2c->dev,
395 "%s 0x%02x=>0x%01x%02x\n",
396 fn, val, br->bank, br->reg);
402 int nvi_i2c_wr_rc(struct nvi_state *st, const struct nvi_br *br,
403 u8 val, const char *fn, u8 *rc)
407 unsigned int rc_bank;
412 rc_bank <<= 7; /* registers only go to 0x7F */
414 rc_msk = 1 << (rc_bank % 64);
419 if (st->rc_msk[rc_bank] & rc_msk) {
420 /* register is cached */
422 /* register data changed */
425 /* register not cached */
433 ret = nvi_i2c_wr(st, br, val, fn);
435 st->rc_msk[rc_bank] &= ~rc_msk;
439 st->rc_msk[rc_bank] |= rc_msk;
441 st->rc_msk[rc_bank] &= ~rc_msk;
448 int nvi_i2c_r(struct nvi_state *st, u8 bank, u8 reg, u16 len, u8 *buf)
450 struct i2c_msg msg[2];
453 ret = nvi_wr_reg_bank_sel(st, bank);
459 msg[0].addr = st->i2c->addr;
463 msg[1].addr = st->i2c->addr;
464 msg[1].flags = I2C_M_RD;
467 if (i2c_transfer(st->i2c->adapter, msg, 2) != 2) {
475 int nvi_i2c_rd(struct nvi_state *st, const struct nvi_br *br, u8 *buf)
481 return nvi_i2c_r(st, br->bank, br->reg, len, buf);
484 int nvi_mem_wr(struct nvi_state *st, u16 addr, u16 len, u8 *data,
487 struct i2c_msg msg[6];
496 ret = nvi_wr_reg_bank_sel(st, st->hal->reg->mem_bank.bank);
500 buf_bank[0] = st->hal->reg->mem_bank.reg;
501 buf_bank[1] = addr >> 8;
502 buf_addr[0] = st->hal->reg->mem_addr.reg;
503 buf_addr[1] = addr & 0xFF;
504 buf_data[0] = st->hal->reg->mem_rw.reg;
505 msg[0].addr = st->i2c->addr;
507 msg[0].len = sizeof(buf_bank);
508 msg[0].buf = buf_bank;
509 msg[1].addr = st->i2c->addr;
511 msg[1].len = sizeof(buf_addr);
512 msg[1].buf = buf_addr;
513 msg[2].addr = st->i2c->addr;
515 msg[2].buf = buf_data;
516 msg[3].addr = st->i2c->addr;
518 msg[3].len = sizeof(buf_addr);
519 msg[3].buf = buf_addr;
520 msg[4].addr = st->i2c->addr;
523 msg[4].buf = buf_data;
524 msg[5].addr = st->i2c->addr;
525 msg[5].flags = I2C_M_RD;
526 msg[5].buf = &buf_data[1];
528 bank_len = (addr + len - 1) >> 8;
529 for (; buf_bank[1] <= bank_len; buf_bank[1]++) {
530 if (buf_bank[1] == bank_len)
531 data_len = len - data_i;
533 data_len = 0x0100 - buf_addr[1];
534 msg[2].len = data_len + 1;
535 memcpy(&buf_data[1], data + data_i, data_len);
536 if (i2c_transfer(st->i2c->adapter, msg, 3) != 3) {
542 msg[5].len = data_len;
543 if (i2c_transfer(st->i2c->adapter, &msg[3], 3) != 3) {
548 ret = memcmp(&buf_data[1], data + data_i, data_len);
560 int nvi_mem_wr_be(struct nvi_state *st, u16 addr, u16 len, u32 val)
566 for (i = 0; i < len; i++)
567 buf[i] = (u8)(val >> (8 * (len - (i + 1))));
568 ret = nvi_mem_wr(st, addr, len, buf, false);
569 if (st->sts & NVI_DBG_SPEW_MSG)
570 dev_info(&st->i2c->dev, "%s 0x%08x=>0x%04hx err=%d\n",
571 __func__, val, addr, ret);
575 int nvi_mem_wr_be_mc(struct nvi_state *st, u16 addr, u16 len, u32 val, u32 *mc)
579 if (val != *mc || st->mc_dis) {
580 ret = nvi_mem_wr_be(st, addr, len, val);
587 int nvi_mem_rd(struct nvi_state *st, u16 addr, u16 len, u8 *data)
589 struct i2c_msg msg[4];
597 ret = nvi_wr_reg_bank_sel(st, st->hal->reg->mem_bank.bank);
601 buf_bank[0] = st->hal->reg->mem_bank.reg;
602 buf_bank[1] = addr >> 8;
603 buf_addr[0] = st->hal->reg->mem_addr.reg;
604 buf_addr[1] = addr & 0xFF;
605 msg[0].addr = st->i2c->addr;
607 msg[0].len = sizeof(buf_bank);
608 msg[0].buf = buf_bank;
609 msg[1].addr = st->i2c->addr;
611 msg[1].len = sizeof(buf_addr);
612 msg[1].buf = buf_addr;
613 msg[2].addr = st->i2c->addr;
616 msg[2].buf = (u8 *)&st->hal->reg->mem_rw.reg;
617 msg[3].addr = st->i2c->addr;
618 msg[3].flags = I2C_M_RD;
620 bank_len = (addr + len - 1) >> 8;
621 for (; buf_bank[1] <= bank_len; buf_bank[1]++) {
622 if (buf_bank[1] == bank_len)
623 data_len = len - data_i;
625 data_len = 0x0100 - buf_addr[1];
626 msg[3].len = data_len;
627 msg[3].buf = data + data_i;
628 if (i2c_transfer(st->i2c->adapter, msg, 4) != 4) {
640 int nvi_mem_rd_le(struct nvi_state *st, u16 addr, u16 len, u32 *val)
647 ret = nvi_mem_rd(st, addr, len, buf_rd);
649 /* convert to little endian */
650 for (i = 0; i < len; i++) {
661 static int nvi_rd_accel_offset(struct nvi_state *st)
667 for (i = 0; i < AXIS_N; i++) {
668 ret = nvi_i2c_rd(st, &st->hal->reg->a_offset_h[i], buf);
670 st->rc.accel_offset[i] = be16_to_cpup((__be16 *)buf);
675 int nvi_wr_accel_offset(struct nvi_state *st, unsigned int axis, u16 offset)
677 return nvi_i2c_write_rc(st, &st->hal->reg->a_offset_h[axis], offset,
678 __func__, (u8 *)&st->rc.accel_offset[axis], true);
681 static int nvi_rd_gyro_offset(struct nvi_state *st)
687 for (i = 0; i < AXIS_N; i++) {
688 ret = nvi_i2c_rd(st, &st->hal->reg->g_offset_h[i], buf);
690 st->rc.gyro_offset[i] = be16_to_cpup((__be16 *)buf);
695 int nvi_wr_gyro_offset(struct nvi_state *st, unsigned int axis, u16 offset)
697 return nvi_i2c_write_rc(st, &st->hal->reg->g_offset_h[axis], offset,
698 __func__, (u8 *)&st->rc.gyro_offset[axis], true);
701 int nvi_wr_fifo_cfg(struct nvi_state *st, int fifo)
705 if (!st->hal->reg->fifo_cfg.reg)
709 fifo_cfg = (fifo << 2) | 0x01;
712 return nvi_i2c_wr_rc(st, &st->hal->reg->fifo_cfg, fifo_cfg,
713 NULL, &st->rc.fifo_cfg);
716 static int nvi_wr_i2c_slv4_ctrl(struct nvi_state *st, bool slv4_en)
720 val = st->aux.delay_hw;
721 val |= (st->aux.port[AUX_PORT_IO].nmp.ctrl & BIT_I2C_SLV_REG_DIS);
724 return nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv4_ctrl, val,
725 __func__, &st->rc.i2c_slv4_ctrl);
728 static int nvi_rd_int_sts_dmp(struct nvi_state *st)
732 ret = nvi_i2c_rd(st, &st->hal->reg->int_dmp, &st->rc.int_dmp);
734 dev_err(&st->i2c->dev, "%s %x=ERR %d\n",
735 __func__, st->hal->reg->int_dmp.reg, ret);
739 static int nvi_rd_int_status(struct nvi_state *st)
741 u8 buf[4] = {0, 0, 0, 0};
746 ret = nvi_i2c_rd(st, &st->hal->reg->int_status, buf);
748 dev_err(&st->i2c->dev, "%s %x=ERR %d\n",
749 __func__, st->hal->reg->int_status.reg, ret);
751 /* convert to little endian */
752 st->rc.int_status = 0;
753 n = st->hal->reg->int_status.len;
756 for (i = 0; i < n; i++) {
757 st->rc.int_status <<= 8;
758 st->rc.int_status |= buf[i];
761 if (st->rc.int_status & (1 << st->hal->bit->int_dmp))
762 ret = nvi_rd_int_sts_dmp(st);
768 int nvi_int_able(struct nvi_state *st, const char *fn, bool en)
777 if (st->en_msk & (1 << DEV_DMP)) {
778 int_en |= 1 << st->hal->bit->int_dmp;
779 } else if (st->en_msk & MSK_DEV_ALL) {
780 int_msk = 1 << st->hal->bit->int_data_rdy_0;
781 if (st->rc.fifo_cfg & 0x01) {
782 /* multi FIFO enabled */
784 for (; fifo < st->hal->fifo_n; fifo++) {
785 dev = st->hal->fifo_dev[fifo];
789 if (st->rc.fifo_en & st->hal->
790 dev[dev]->fifo_en_msk)
791 int_en |= int_msk << fifo;
798 ret = nvi_i2c_write_rc(st, &st->hal->reg->int_enable, int_en,
799 __func__, (u8 *)&st->rc.int_enable, false);
800 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
801 dev_info(&st->i2c->dev, "%s-%s en=%x int_en=%x err=%d\n",
802 __func__, fn, en, int_en, ret);
806 static void nvi_flush_aux(struct nvi_state *st, int port)
808 struct aux_port *ap = &st->aux.port[port];
811 ap->nmp.handler(NULL, 0, 0, ap->nmp.ext_driver);
814 static void nvi_flush_push(struct nvi_state *st)
820 for (i = 0; i < DEV_N; i++) {
821 if (st->snsr[i].flush) {
822 ret = st->nvs->handler(st->snsr[i].nvs_st, NULL, 0LL);
824 st->snsr[i].flush = false;
827 for (i = 0; i < AUX_PORT_IO; i++) {
828 ap = &st->aux.port[i];
830 nvi_flush_aux(st, i);
835 static int nvi_user_ctrl_rst(struct nvi_state *st, u8 user_ctrl)
844 if (user_ctrl & BIT_SIG_COND_RST)
845 user_ctrl = BITS_USER_CTRL_RST;
846 if (user_ctrl & BIT_DMP_RST)
847 user_ctrl |= BIT_FIFO_RST;
848 if (user_ctrl & BIT_FIFO_RST) {
850 if (st->hal->reg->fifo_rst.reg) {
852 if (st->en_msk & (1 << DEV_DMP)) {
853 ret = nvi_wr_fifo_cfg(st,
854 st->hal->dmp->fifo_mode);
857 for (i = 0; i < DEV_AXIS_N; i++) {
858 if (st->hal->dev[i]->fifo_en_msk &&
863 msk = st->snsr[DEV_AUX].enable;
864 msk |= st->aux.dmp_en_msk;
865 if (st->hal->dev[DEV_AUX]->fifo_en_msk && msk)
868 ret = nvi_wr_fifo_cfg(st, 0);
870 ret = nvi_wr_fifo_cfg(st, -1);
872 if (st->en_msk & (1 << DEV_DMP))
876 ret |= nvi_i2c_wr(st, &st->hal->reg->fifo_rst,
878 ret |= nvi_i2c_wr(st, &st->hal->reg->fifo_rst,
884 if (user_ctrl == BIT_FIFO_RST)
888 user_ctrl &= ~BIT_FIFO_RST;
892 ret = nvi_i2c_wr(st, &st->hal->reg->user_ctrl, user_ctrl, __func__);
896 if (user_ctrl & BIT_FIFO_RST)
898 for (i = 0; i < POWER_UP_TIME; i++) {
900 ret = nvi_i2c_rd(st, &st->hal->reg->user_ctrl,
902 if (!(user_ctrl & BITS_USER_CTRL_RST))
908 st->rc.user_ctrl = user_ctrl;
909 if (user_ctrl & BIT_DMP_RST && st->hal->dmp) {
910 if (st->hal->dmp->dmp_reset_delay_ms)
911 msleep(st->hal->dmp->dmp_reset_delay_ms);
918 int nvi_user_ctrl_en(struct nvi_state *st, const char *fn,
919 bool en_dmp, bool en_fifo, bool en_i2c, bool en_irq)
927 if (!(st->en_msk & (1 << DEV_DMP)))
930 if (en_fifo && !en_dmp) {
931 for (i = 0; i < st->hal->src_n; i++)
932 st->src[i].fifo_data_n = 0;
934 for (i = 0; i < DEV_MPU_N; i++) {
935 if (st->snsr[i].enable &&
936 st->hal->dev[i]->fifo_en_msk) {
937 val |= st->hal->dev[i]->fifo_en_msk;
938 st->src[st->hal->dev[i]->src].fifo_data_n +=
939 st->hal->dev[i]->fifo_data_n;
940 st->fifo_src = st->hal->dev[i]->src;
944 if (st->hal->dev[DEV_AUX]->fifo_en_msk &&
945 st->snsr[DEV_AUX].enable) {
946 st->src[st->hal->dev[DEV_AUX]->src].fifo_data_n +=
948 st->fifo_src = st->hal->dev[DEV_AUX]->src;
949 for (i = 0; i < AUX_PORT_IO; i++) {
950 ap = &st->aux.port[i];
951 if (st->snsr[DEV_AUX].enable & (1 << i) &&
952 (ap->nmp.addr & BIT_I2C_READ) &&
955 st->hal->bit->slv_fifo_en[i]);
963 ret |= nvi_i2c_write_rc(st, &st->hal->reg->fifo_en, val,
964 __func__, (u8 *)&st->rc.fifo_en, false);
971 if (en_i2c && (st->en_msk & (1 << DEV_AUX)))
972 val |= BIT_I2C_MST_EN;
976 ret = nvi_int_able(st, __func__, true);
979 ret |= nvi_i2c_wr_rc(st, &st->hal->reg->user_ctrl, val,
980 __func__, &st->rc.user_ctrl);
982 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
983 dev_info(&st->i2c->dev,
984 "%s-%s DMP=%x FIFO=%x I2C=%x IRQ=%x err=%d\n",
985 __func__, fn, en_dmp, en_fifo, en_i2c, en_irq, ret);
989 int nvi_wr_pm1(struct nvi_state *st, const char *fn, u8 pm1)
995 if (pm1 & BIT_H_RESET) {
996 /* must make sure FIFO is off or IRQ storm will occur */
997 ret = nvi_int_able(st, __func__, false);
998 ret |= nvi_user_ctrl_en(st, __func__,
999 false, false, false, false);
1001 nvi_user_ctrl_rst(st, BITS_USER_CTRL_RST);
1002 ret = nvi_i2c_wr(st, &st->hal->reg->pm1,
1003 BIT_H_RESET, __func__);
1006 ret = nvi_i2c_wr_rc(st, &st->hal->reg->pm1, pm1,
1007 __func__, &st->rc.pm1);
1009 st->pm = NVI_PM_ERR;
1010 if (pm1 & BIT_H_RESET && !ret) {
1011 st->en_msk &= MSK_RST;
1012 nvi_rc_clr(st, __func__);
1014 for (i = 0; i < st->hal->src_n; i++)
1015 st->src[i].period_us_req = 0;
1017 for (i = 0; i < (POWER_UP_TIME / REG_UP_TIME); i++) {
1018 mdelay(REG_UP_TIME);
1020 ret = nvi_i2c_rd(st, &st->hal->reg->pm1, &pm1_rd);
1021 if ((!ret) && (!(pm1_rd & BIT_H_RESET)))
1026 nvi_rd_accel_offset(st);
1027 nvi_rd_gyro_offset(st);
1030 if (st->sts & NVI_DBG_SPEW_MSG)
1031 dev_info(&st->i2c->dev, "%s-%s pm1=%x err=%d\n",
1032 __func__, fn, pm1, ret);
1036 static int nvi_pm_w(struct nvi_state *st, u8 pm1, u8 pm2, u8 lp)
1039 unsigned int delay_ms;
1043 ret = nvs_vregs_enable(&st->i2c->dev, st->vreg, ARRAY_SIZE(nvi_vregs));
1046 for (i = 0; i < ARRAY_SIZE(nvi_vregs); i++) {
1047 por_ns = nvs_timestamp() - st->ts_vreg_en[i];
1048 if ((por_ns < 0) || (!st->ts_vreg_en[i])) {
1049 delay_ms = (POR_MS * 1000000);
1053 if (por_ns < (POR_MS * 1000000)) {
1054 por_ns = (POR_MS * 1000000) - por_ns;
1055 if (por_ns > delay_ms)
1056 delay_ms = (unsigned int)por_ns;
1059 delay_ms /= 1000000;
1060 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1061 dev_info(&st->i2c->dev, "%s %ums delay\n",
1062 __func__, delay_ms);
1065 ret = nvi_wr_pm1(st, __func__, BIT_H_RESET);
1067 ret |= st->hal->fn->pm(st, pm1, pm2, lp);
1071 int nvi_pm_wr(struct nvi_state *st, const char *fn, u8 pm1, u8 pm2, u8 lp)
1075 ret = nvi_pm_w(st, pm1, pm2, lp);
1076 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1077 dev_info(&st->i2c->dev, "%s-%s PM1=%x PM2=%x LPA=%x err=%d\n",
1078 __func__, fn, pm1, pm2, lp, ret);
1079 st->pm = NVI_PM_ERR; /* lost st->pm status: nvi_pm is being bypassed */
1085 * @param pm_req: call with one of the following:
1086 * NVI_PM_OFF_FORCE = force off state
1087 * NVI_PM_ON = minimum power for device access
1088 * NVI_PM_ON_FULL = power for gyro
1089 * NVI_PM_AUTO = automatically sets power after
1091 * Typical use is to set needed power for configuration and
1092 * then call with NVI_PM_AUTO when done. All other NVI_PM_
1093 * levels are handled automatically and are for internal
1095 * @return int: returns 0 for success or error code
1097 static int nvi_pm(struct nvi_state *st, const char *fn, int pm_req)
1106 lp = st->rc.lp_config;
1107 if (pm_req == NVI_PM_AUTO) {
1109 if (!(st->en_msk & MSK_PM_ACC_EN))
1110 pm2 |= BIT_PWR_ACCEL_STBY;
1111 if (!st->snsr[DEV_GYR].enable)
1112 pm2 |= BIT_PWR_GYRO_STBY;
1113 if (st->en_msk & MSK_PM_ON_FULL) {
1114 pm = NVI_PM_ON_FULL;
1115 } else if (st->en_msk & MSK_PM_ON) {
1117 } else if ((st->en_msk & ((1 << EN_LP) |
1118 MSK_DEV_ALL)) == MSK_PM_LP) {
1119 if (st->snsr[DEV_ACC].period_us >=
1120 st->snsr[DEV_ACC].cfg.thresh_hi) {
1121 for (lp = 0; lp < st->hal->lp_tbl_n; lp++) {
1122 if (st->snsr[DEV_ACC].period_us >=
1123 st->hal->lp_tbl[lp])
1126 pm = NVI_PM_ON_CYCLE;
1130 } else if (st->en_msk & MSK_PM_LP) {
1132 } else if (st->en_msk & MSK_PM_STDBY || st->aux.bypass_lock) {
1139 if ((pm_req > NVI_PM_STDBY) && (pm_req < st->pm))
1144 if (pm == NVI_PM_OFF) {
1145 for (i = 0; i < AUX_PORT_IO; i++) {
1146 if (st->aux.port[i].nmp.shutdown_bypass) {
1147 nvi_aux_bypass_enable(st, true);
1152 if (st->en_msk & (1 << FW_LOADED))
1157 case NVI_PM_OFF_FORCE:
1162 pm2 = (BIT_PWR_ACCEL_STBY | BIT_PWR_GYRO_STBY);
1165 case NVI_PM_ON_CYCLE:
1167 pm2 &= ~BIT_PWR_ACCEL_STBY;
1171 pm1 = INV_CLK_INTERNAL;
1172 if (pm2 & BIT_PWR_ACCEL_STBY) {
1173 for (i = 0; i < DEV_N_AUX; i++) {
1174 if (MSK_PM_ACC_EN & (1 << i)) {
1175 if (st->snsr[i].enable) {
1176 pm2 &= ~BIT_PWR_ACCEL_STBY;
1185 case NVI_PM_ON_FULL:
1187 /* gyro must be turned on before going to PLL clock */
1188 pm2 &= ~BIT_PWR_GYRO_STBY;
1192 dev_err(&st->i2c->dev, "%s %d=>%d ERR=EINVAL\n",
1193 __func__, st->pm, pm);
1197 if (pm != st->pm || lp != st->rc.lp_config || pm2 != (st->rc.pm2 &
1198 (BIT_PWR_ACCEL_STBY | BIT_PWR_GYRO_STBY))) {
1199 if (pm == NVI_PM_OFF) {
1200 if (st->pm > NVI_PM_OFF || st->pm == NVI_PM_ERR)
1201 ret |= nvi_wr_pm1(st, __func__, BIT_H_RESET);
1202 ret |= nvi_pm_w(st, pm1, pm2, lp);
1203 ret |= nvs_vregs_disable(&st->i2c->dev, st->vreg,
1204 ARRAY_SIZE(nvi_vregs));
1206 if (pm == NVI_PM_ON_CYCLE)
1207 /* last chance to write to regs before cycle */
1208 ret |= nvi_int_able(st, __func__, true);
1209 ret |= nvi_pm_w(st, pm1, pm2, lp);
1210 if (pm > NVI_PM_STDBY)
1211 mdelay(REG_UP_TIME);
1214 dev_err(&st->i2c->dev, "%s PM %d=>%d ERR=%d\n",
1215 __func__, st->pm, pm, ret);
1218 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1219 dev_info(&st->i2c->dev,
1220 "%s-%s PM %d=>%d PM1=%x PM2=%x LP=%x\n",
1221 __func__, fn, st->pm, pm, pm1, pm2, lp);
1229 static void nvi_pm_exit(struct nvi_state *st)
1232 nvi_pm(st, __func__, NVI_PM_OFF_FORCE);
1233 nvs_vregs_exit(&st->i2c->dev, st->vreg, ARRAY_SIZE(nvi_vregs));
1236 static int nvi_pm_init(struct nvi_state *st)
1240 ret = nvs_vregs_init(&st->i2c->dev,
1241 st->vreg, ARRAY_SIZE(nvi_vregs), nvi_vregs);
1242 st->pm = NVI_PM_ERR;
1246 static int nvi_dmp_fw(struct nvi_state *st)
1248 #if NVI_FW_CRC_CHECK
1250 #endif /* NVI_FW_CRC_CHECK */
1253 st->icm_dmp_war = false;
1257 #if NVI_FW_CRC_CHECK
1258 crc32 = crc32(0, st->hal->dmp->fw, st->hal->dmp->fw_len);
1259 if (crc32 != st->hal->dmp->fw_crc32) {
1260 dev_err(&st->i2c->dev, "%s FW CRC FAIL %x != %x\n",
1261 __func__, crc32, st->hal->dmp->fw_crc32);
1264 #endif /* NVI_FW_CRC_CHECK */
1266 ret = nvi_user_ctrl_en(st, __func__, false, false, false, false);
1270 ret = nvi_mem_wr(st, st->hal->dmp->fw_mem_addr,
1271 st->hal->dmp->fw_len,
1272 (u8 *)st->hal->dmp->fw, true);
1274 dev_err(&st->i2c->dev, "%s ERR: nvi_mem_wr\n", __func__);
1278 ret = nvi_i2c_write_rc(st, &st->hal->reg->fw_start,
1279 st->hal->dmp->fw_start,
1280 __func__, NULL, true);
1284 ret = st->hal->dmp->fn_init(st); /* nvi_dmp_init */
1286 dev_err(&st->i2c->dev, "%s ERR: nvi_dmp_init\n", __func__);
1290 nvi_user_ctrl_en(st, __func__, false, false, false, false);
1291 st->en_msk |= (1 << FW_LOADED);
1295 void nvi_push_delay(struct nvi_state *st)
1299 for (i = 0; i < DEV_MPU_N; i++) {
1300 if (st->snsr[i].enable) {
1301 if (st->snsr[i].push_delay_ns &&
1302 !st->snsr[i].ts_push_delay)
1303 st->snsr[i].ts_push_delay = nvs_timestamp() +
1304 st->snsr[i].push_delay_ns;
1306 st->snsr[i].ts_push_delay = 0;
1311 int nvi_aux_delay(struct nvi_state *st, const char *fn)
1314 unsigned int msk_en;
1315 unsigned int src_us;
1320 /* determine valid delays by ports enabled */
1322 msk_en = st->snsr[DEV_AUX].enable | st->aux.dmp_en_msk;
1323 for (i = 0; msk_en; i++) {
1324 if (msk_en & (1 << i)) {
1325 msk_en &= ~(1 << i);
1326 if (delay < st->aux.port[i].nmp.delay_ms)
1327 delay = st->aux.port[i].nmp.delay_ms;
1330 src_us = st->src[st->hal->dev[DEV_AUX]->src].period_us_src;
1332 delay *= 1000; /* ms => us */
1333 if (delay % src_us) {
1343 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1344 dev_info(&st->i2c->dev, "%s-%s aux.delay_hw=%u=>%u\n",
1345 __func__, fn, st->aux.delay_hw, delay);
1346 st->aux.delay_hw = delay;
1347 ret = nvi_wr_i2c_slv4_ctrl(st, (bool)
1348 (st->rc.i2c_slv4_ctrl & BIT_SLV_EN));
1349 /* HW port delay enable */
1350 val = BIT_DELAY_ES_SHADOW;
1351 for (i = 0; i < AUX_PORT_MAX; i++) {
1352 if (st->aux.port[i].nmp.delay_ms)
1355 ret |= nvi_i2c_wr_rc(st, &st->hal->reg->i2c_mst_delay_ctrl, val,
1356 __func__, &st->rc.i2c_mst_delay_ctrl);
1360 static int nvi_timeout(struct nvi_state *st)
1362 bool disabled = true;
1363 unsigned int timeout_us = -1;
1366 /* find the fastest batch timeout of all the enabled devices */
1367 for (i = 0; i < DEV_N_AUX; i++) {
1368 if (st->snsr[i].enable) {
1369 if (st->snsr[i].timeout_us < timeout_us)
1370 timeout_us = st->snsr[i].timeout_us;
1375 disabled = true; /* batch mode is currently disabled */
1377 timeout_us = 0; /* batch mode disabled */
1378 if (timeout_us != st->bm_timeout_us) {
1379 st->bm_timeout_us = timeout_us;
1386 static int nvi_period_src(struct nvi_state *st, int src)
1388 bool enabled = false;
1389 unsigned int period_us = -1;
1390 unsigned int dev_msk;
1396 /* find the fastest period of all the enabled devices */
1397 dev_msk = st->hal->src[src].dev_msk;
1398 for (i = 0; dev_msk; i++) {
1399 if (dev_msk & (1 << i)) {
1400 dev_msk &= ~(1 << i);
1401 if (st->snsr[i].enable && st->snsr[i].period_us) {
1402 if (st->snsr[i].period_us < period_us)
1403 period_us = st->snsr[i].period_us;
1410 if (period_us < st->hal->src[src].period_us_min)
1411 period_us = st->hal->src[src].period_us_min;
1412 if (period_us > st->hal->src[src].period_us_max)
1413 period_us = st->hal->src[src].period_us_max;
1414 if (period_us != st->src[src].period_us_req) {
1415 st->src[src].period_us_req = period_us;
1423 int nvi_period_aux(struct nvi_state *st)
1425 bool enabled = false;
1426 unsigned int period_us = -1;
1427 unsigned int timeout_us = -1;
1428 unsigned int msk_en;
1432 msk_en = st->snsr[DEV_AUX].enable | st->aux.dmp_en_msk;
1433 for (i = 0; msk_en; i++) {
1434 if (msk_en & (1 << i)) {
1435 msk_en &= ~(1 << i);
1436 if (st->aux.port[i].period_us) {
1437 if (st->aux.port[i].period_us < period_us)
1438 period_us = st->aux.port[i].period_us;
1439 if (st->aux.port[i].timeout_us < timeout_us)
1441 st->aux.port[i].timeout_us;
1448 st->snsr[DEV_AUX].period_us = period_us;
1449 st->snsr[DEV_AUX].timeout_us = timeout_us;
1451 ret = nvi_period_src(st, st->hal->dev[DEV_AUX]->src);
1452 ret |= nvi_timeout(st);
1456 static int nvi_period_all(struct nvi_state *st)
1461 for (src = 0; src < st->hal->src_n; src++) {
1462 if (st->hal->src[src].dev_msk & (1 << DEV_AUX))
1463 continue; /* run nvi_period_aux last for timeout */
1465 ret |= nvi_period_src(st, src);
1468 ret |= nvi_period_aux(st);
1472 static int nvi_en(struct nvi_state *st)
1474 bool dmp_en = false;
1480 if (st->snsr[DEV_GYR].enable) {
1481 ret_t = nvi_pm(st, __func__, NVI_PM_ON_FULL);
1485 for (i = 0; i < DEV_N_AUX; i++) {
1486 if (st->snsr[i].enable) {
1487 ret_t = nvi_pm(st, __func__, NVI_PM_ON);
1494 ret_t = nvi_pm(st, __func__, NVI_PM_AUTO);
1495 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1496 dev_info(&st->i2c->dev, "%s en_msk=%x err=%d\n",
1497 __func__, st->en_msk, ret_t);
1501 ret_t |= nvi_int_able(st, __func__, false);
1502 ret_t |= nvi_user_ctrl_en(st, __func__, false, false, false, false);
1504 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1505 dev_err(&st->i2c->dev, "%s en_msk=%x ERR=%d\n",
1506 __func__, st->en_msk, ret_t);
1510 if (st->en_msk & (1 << FW_LOADED)) {
1511 /* test if batch is needed or more specifically that an
1512 * enabled sensor doesn't support batch. The DMP can't
1513 * do batch and non-batch at the same time.
1515 if (st->bm_timeout_us) {
1518 /* batch disabled - test if a DMP sensor is enabled */
1519 for (i = 0; i < DEV_N_AUX; i++) {
1520 if (st->dmp_en_msk & (1 << i)) {
1521 if (st->snsr[i].enable) {
1530 ret_t |= st->hal->dmp->fn_en(st); /* nvi_dmp_en */
1531 st->en_msk |= (1 << DEV_DMP);
1533 /* reprogram for non-DMP mode below */
1535 if (st->sts & (NVS_STS_SPEW_MSG |
1537 dev_err(&st->i2c->dev,
1541 if (st->sts & (NVS_STS_SPEW_MSG |
1543 dev_info(&st->i2c->dev,
1544 "%s DMP enabled\n", __func__);
1549 if (st->en_msk & (1 << DEV_DMP)) {
1550 st->en_msk &= ~(MSK_DEV_SNSR | (1 << DEV_DMP));
1551 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1552 dev_info(&st->i2c->dev,
1553 "%s DMP disabled\n", __func__);
1554 if (st->aux.dmp_en_msk) {
1555 st->aux.dmp_en_msk = 0;
1556 nvi_aux_enable(st, __func__, true, true);
1558 for (i = 0; i < DEV_N_AUX; i++)
1559 st->snsr[i].odr = 0;
1561 for (i = 0; i < AUX_PORT_MAX; i++)
1562 st->aux.port[i].odr = 0;
1565 for (i = 0; i < st->hal->src_n; i++)
1566 ret_t |= st->hal->src[i].fn_period(st);
1568 if (st->snsr[DEV_ACC].enable) {
1569 ret = st->hal->fn->en_acc(st);
1572 st->en_msk &= ~(1 << DEV_ACC);
1574 st->en_msk |= (1 << DEV_ACC);
1577 if (st->snsr[DEV_GYR].enable) {
1578 ret = st->hal->fn->en_gyr(st);
1581 st->en_msk &= ~(1 << DEV_GYR);
1583 st->en_msk |= (1 << DEV_GYR);
1587 /* NVI_PM_AUTO to go to NVI_PM_ON_CYCLE if need be */
1588 /* this also restores correct PM mode if error */
1589 ret_t |= nvi_pm(st, __func__, NVI_PM_AUTO);
1590 if (st->pm > NVI_PM_ON_CYCLE)
1591 ret_t |= nvi_reset(st, __func__, true, false, true);
1593 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1594 dev_info(&st->i2c->dev, "%s en_msk=%x err=%d\n",
1595 __func__, st->en_msk, ret_t);
1599 static void nvi_aux_dbg(struct nvi_state *st, char *tag, int val)
1601 struct nvi_mpu_port *n;
1603 struct aux_ports *a;
1608 if (!(st->sts & NVI_DBG_SPEW_AUX))
1611 dev_info(&st->i2c->dev, "%s %s %d\n", __func__, tag, val);
1613 for (i = 0; i < AUX_PORT_IO; i++) {
1614 ret = nvi_i2c_rd(st, &st->hal->reg->i2c_slv_addr[i], &data[0]);
1615 ret |= nvi_i2c_rd(st, &st->hal->reg->i2c_slv_reg[i], &data[1]);
1616 ret |= nvi_i2c_rd(st, &st->hal->reg->i2c_slv_ctrl[i],
1618 ret |= nvi_i2c_rd(st, &st->hal->reg->i2c_slv_do[i], &data[3]);
1621 pr_info("HW: ERR=%d\n", ret);
1623 pr_info("HW: P%d AD=%x RG=%x CL=%x DO=%x\n",
1624 i, data[0], data[1], data[2], data[3]);
1625 /* RC = hardware register cache */
1626 pr_info("HC: P%d AD=%x RG=%x CL=%x DO=%x\n",
1627 i, st->rc.i2c_slv_addr[i], st->rc.i2c_slv_reg[i],
1628 st->rc.i2c_slv_ctrl[i], st->rc.i2c_slv_do[i]);
1629 n = &st->aux.port[i].nmp;
1630 /* NS = nmp structure */
1631 pr_info("NS: P%d AD=%x RG=%x CL=%x DO=%x MS=%u US=%u SB=%x\n",
1632 i, n->addr, n->reg, n->ctrl, n->data_out, n->delay_ms,
1633 st->aux.port[i].period_us, n->shutdown_bypass);
1634 p = &st->aux.port[i];
1635 /* PS = port structure */
1636 pr_info("PS: P%d OFFSET=%u DMP_CTRL=%x EN=%x HWDOUT=%x\n",
1637 i, p->ext_data_offset, !!(a->dmp_ctrl_msk & (1 << i)),
1638 !!(st->snsr[DEV_AUX].enable & (1 << i)), p->hw_do);
1641 pr_info("AUX: EN=%x MEN=%x DEN=%x DLY=%x SRC=%u DN=%u BEN=%x BLK=%d\n",
1642 !!(st->en_msk & (1 << DEV_AUX)),
1643 !!(st->rc.user_ctrl & BIT_I2C_MST_EN), st->aux.dmp_en_msk,
1644 (st->rc.i2c_slv4_ctrl & BITS_I2C_MST_DLY),
1645 st->src[st->hal->dev[DEV_AUX]->src].period_us_src,
1646 a->ext_data_n, (st->rc.int_pin_cfg & BIT_BYPASS_EN),
1650 static void nvi_aux_ext_data_offset(struct nvi_state *st)
1653 unsigned int offset = 0;
1655 for (i = 0; i < AUX_PORT_IO; i++) {
1656 if (st->aux.port[i].nmp.addr & BIT_I2C_READ) {
1657 st->aux.port[i].ext_data_offset = offset;
1658 offset += (st->rc.i2c_slv_ctrl[i] &
1659 BITS_I2C_SLV_CTRL_LEN);
1662 if (offset > AUX_EXT_DATA_REG_MAX) {
1663 offset = AUX_EXT_DATA_REG_MAX;
1664 dev_err(&st->i2c->dev,
1665 "%s ERR MPU slaves exceed data storage\n", __func__);
1667 st->aux.ext_data_n = offset;
1671 static int nvi_aux_port_data_out(struct nvi_state *st,
1672 int port, u8 data_out)
1676 ret = nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_do[port], data_out,
1677 NULL, &st->rc.i2c_slv_do[port]);
1679 st->aux.port[port].nmp.data_out = data_out;
1680 st->aux.port[port].hw_do = true;
1682 st->aux.port[port].hw_do = false;
1687 static int nvi_aux_port_wr(struct nvi_state *st, int port)
1689 struct aux_port *ap;
1692 ap = &st->aux.port[port];
1693 ret = nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_addr[port],
1694 ap->nmp.addr, __func__, &st->rc.i2c_slv_addr[port]);
1695 ret |= nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_reg[port], ap->nmp.reg,
1696 __func__, &st->rc.i2c_slv_reg[port]);
1697 ret |= nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_do[port],
1698 ap->nmp.data_out, __func__, &st->rc.i2c_slv_do[port]);
1702 static int nvi_aux_port_en(struct nvi_state *st, int port, bool en)
1704 struct aux_port *ap;
1707 unsigned int dmp_ctrl_msk;
1710 ap = &st->aux.port[port];
1711 if (en && !st->rc.i2c_slv_addr[port]) {
1712 ret = nvi_aux_port_wr(st, port);
1716 if (en && !ap->hw_do)
1717 nvi_aux_port_data_out(st, port, ap->nmp.data_out);
1718 if (port == AUX_PORT_IO) {
1719 ret = nvi_wr_i2c_slv4_ctrl(st, en);
1721 slv_ctrl = st->rc.i2c_slv_ctrl[port];
1723 dmp_ctrl_msk = st->aux.dmp_ctrl_msk;
1724 if (st->en_msk & (1 << DEV_DMP)) {
1725 val = ap->nmp.dmp_ctrl | BIT_SLV_EN;
1726 st->aux.dmp_ctrl_msk |= (1 << port);
1728 val = ap->nmp.ctrl | BIT_SLV_EN;
1729 st->aux.dmp_ctrl_msk &= ~(1 << port);
1731 if (ap->nmp.dmp_ctrl != ap->nmp.ctrl && dmp_ctrl_msk !=
1732 st->aux.dmp_ctrl_msk)
1733 /* AUX HW needs to be reset if slv_ctrl values
1734 * change other than enable bit.
1736 st->aux.reset_i2c = true;
1739 st->aux.dmp_ctrl_msk &= ~(1 << port);
1741 ret = nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_ctrl[port], val,
1742 __func__, &st->rc.i2c_slv_ctrl[port]);
1743 if (slv_ctrl != st->rc.i2c_slv_ctrl[port])
1744 nvi_aux_ext_data_offset(st);
1749 int nvi_aux_enable(struct nvi_state *st, const char *fn,
1750 bool en_req, bool force)
1752 bool enable = en_req;
1753 bool enabled = false;
1755 unsigned int msk_en;
1759 if (st->rc.int_pin_cfg & BIT_BYPASS_EN)
1761 /* global enable is honored only if a port is enabled */
1762 msk_en = st->snsr[DEV_AUX].enable | st->aux.dmp_en_msk;
1765 if (st->en_msk & (1 << DEV_AUX))
1767 if (force || enable != enabled) {
1769 st->en_msk |= (1 << DEV_AUX);
1770 for (i = 0; i < AUX_PORT_MAX; i++) {
1771 if (msk_en & (1 << i))
1775 ret |= nvi_aux_port_en(st, i, en);
1778 st->en_msk &= ~(1 << DEV_AUX);
1779 for (i = 0; i < AUX_PORT_MAX; i++) {
1780 if (st->rc.i2c_slv_addr[i])
1781 nvi_aux_port_en(st, i, false);
1784 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG |
1786 dev_info(&st->i2c->dev,
1787 "%s-%s en_req=%x enabled: %x->%x err=%d\n",
1788 __func__, fn, en_req, enabled, enable, ret);
1793 static int nvi_aux_port_enable(struct nvi_state *st,
1794 unsigned int port_mask, bool en)
1796 unsigned int enabled;
1800 enabled = st->snsr[DEV_AUX].enable;
1802 st->snsr[DEV_AUX].enable |= port_mask;
1804 st->snsr[DEV_AUX].enable &= ~port_mask;
1805 if (enabled == st->snsr[DEV_AUX].enable)
1808 if (st->hal->dev[DEV_AUX]->fifo_en_msk) {
1810 for (i = 0; i < AUX_PORT_IO; i++) {
1811 if (port_mask & (1 << i)) {
1812 if (st->aux.port[i].nmp.addr & BIT_I2C_READ)
1813 st->aux.reset_fifo = true;
1817 if (en && (st->rc.int_pin_cfg & BIT_BYPASS_EN))
1821 for (i = 0; i < AUX_PORT_MAX; i++) {
1822 if (port_mask & (1 << i))
1823 ret |= nvi_aux_port_en(st, i, en);
1825 ret |= nvi_aux_enable(st, __func__, true, false);
1827 if (port_mask & ((1 << AUX_PORT_IO) - 1))
1832 static int nvi_aux_port_free(struct nvi_state *st, int port)
1834 memset(&st->aux.port[port], 0, sizeof(struct aux_port));
1835 st->snsr[DEV_AUX].enable &= ~(1 << port);
1836 st->aux.dmp_en_msk &= ~(1 << port);
1837 if (st->rc.i2c_slv_addr[port]) {
1838 nvi_aux_port_wr(st, port);
1839 nvi_aux_port_en(st, port, false);
1840 nvi_aux_enable(st, __func__, false, false);
1841 nvi_user_ctrl_en(st, __func__, false, false, false, false);
1842 nvi_aux_enable(st, __func__, true, false);
1843 if (port != AUX_PORT_IO)
1844 st->aux.reset_i2c = true;
1851 static int nvi_aux_port_alloc(struct nvi_state *st,
1852 struct nvi_mpu_port *nmp, int port)
1856 if (st->aux.reset_i2c)
1857 nvi_reset(st, __func__, false, true, true);
1859 for (i = 0; i < AUX_PORT_IO; i++) {
1860 if (st->aux.port[i].nmp.addr == 0)
1863 if (i == AUX_PORT_IO)
1866 if (st->aux.port[port].nmp.addr == 0)
1872 memset(&st->aux.port[i], 0, sizeof(struct aux_port));
1873 memcpy(&st->aux.port[i].nmp, nmp, sizeof(struct nvi_mpu_port));
1874 if (!st->aux.port[i].nmp.dmp_ctrl)
1875 st->aux.port[i].nmp.dmp_ctrl = st->aux.port[i].nmp.ctrl;
1876 st->aux.port[i].period_us = st->aux.port[i].nmp.delay_us;
1880 static int nvi_aux_bypass_enable(struct nvi_state *st, bool en)
1885 if (en && (st->rc.int_pin_cfg & BIT_BYPASS_EN))
1888 val = st->rc.int_pin_cfg;
1890 ret = nvi_aux_enable(st, __func__, false, false);
1891 ret |= nvi_user_ctrl_en(st, __func__,
1892 false, false, false, false);
1894 val |= BIT_BYPASS_EN;
1895 ret = nvi_i2c_wr_rc(st, &st->hal->reg->int_pin_cfg,
1896 val, __func__, &st->rc.int_pin_cfg);
1899 val &= ~BIT_BYPASS_EN;
1900 ret = nvi_i2c_wr_rc(st, &st->hal->reg->int_pin_cfg, val,
1901 __func__, &st->rc.int_pin_cfg);
1903 nvi_aux_enable(st, __func__, true, false);
1910 static int nvi_aux_bypass_request(struct nvi_state *st, bool enable)
1916 if ((bool)(st->rc.int_pin_cfg & BIT_BYPASS_EN) == enable) {
1917 st->aux.bypass_timeout_ns = nvs_timestamp();
1918 st->aux.bypass_lock++;
1919 if (!st->aux.bypass_lock)
1920 dev_err(&st->i2c->dev, "%s rollover ERR\n", __func__);
1922 if (st->aux.bypass_lock) {
1923 ns = nvs_timestamp() - st->aux.bypass_timeout_ns;
1924 to = st->bypass_timeout_ms;
1927 st->aux.bypass_lock = 0;
1931 if (!st->aux.bypass_lock) {
1932 ret = nvi_aux_bypass_enable(st, enable);
1934 dev_err(&st->i2c->dev, "%s ERR=%d\n",
1937 st->aux.bypass_lock++;
1943 static int nvi_aux_bypass_release(struct nvi_state *st)
1947 if (st->aux.bypass_lock)
1948 st->aux.bypass_lock--;
1949 if (!st->aux.bypass_lock) {
1950 ret = nvi_aux_bypass_enable(st, false);
1952 dev_err(&st->i2c->dev, "%s ERR=%d\n", __func__, ret);
1957 static int nvi_aux_dev_valid(struct nvi_state *st,
1958 struct nvi_mpu_port *nmp, u8 *data)
1964 /* turn off bypass */
1965 ret = nvi_aux_bypass_request(st, false);
1969 /* grab the special port */
1970 ret = nvi_aux_port_alloc(st, nmp, AUX_PORT_IO);
1971 if (ret != AUX_PORT_IO) {
1972 nvi_aux_bypass_release(st);
1976 /* enable it at fastest speed */
1977 st->aux.port[AUX_PORT_IO].nmp.delay_ms = 0;
1978 st->aux.port[AUX_PORT_IO].period_us =
1979 st->hal->src[st->hal->dev[DEV_AUX]->src].period_us_min;
1980 ret = nvi_user_ctrl_en(st, __func__, false, false, false, false);
1981 ret |= nvi_aux_port_enable(st, 1 << AUX_PORT_IO, true);
1982 ret |= nvi_user_ctrl_en(st, __func__, false, false, true, false);
1984 nvi_aux_port_free(st, AUX_PORT_IO);
1985 nvi_aux_bypass_release(st);
1989 /* now turn off all the other ports for fastest response */
1990 for (i = 0; i < AUX_PORT_IO; i++) {
1991 if (st->rc.i2c_slv_addr[i])
1992 nvi_aux_port_en(st, i, false);
1994 /* start reading the results */
1995 for (i = 0; i < AUX_DEV_VALID_READ_LOOP_MAX; i++) {
1996 mdelay(AUX_DEV_VALID_READ_DELAY_MS);
1998 ret = nvi_i2c_rd(st, &st->hal->reg->i2c_mst_status, &val);
2005 /* these will restore all previously disabled ports */
2006 nvi_aux_bypass_release(st);
2007 nvi_aux_port_free(st, AUX_PORT_IO);
2008 if (i >= AUX_DEV_VALID_READ_LOOP_MAX)
2011 if (val & 0x10) /* NACK */
2014 if (nmp->addr & BIT_I2C_READ) {
2015 ret = nvi_i2c_rd(st, &st->hal->reg->i2c_slv4_di, &val);
2020 dev_info(&st->i2c->dev, "%s MPU read 0x%x from device 0x%x\n",
2021 __func__, val, (nmp->addr & ~BIT_I2C_READ));
2023 dev_info(&st->i2c->dev, "%s MPU found device 0x%x\n",
2024 __func__, (nmp->addr & ~BIT_I2C_READ));
2029 static int nvi_aux_mpu_call_pre(struct nvi_state *st, int port)
2031 if ((port < 0) || (port >= AUX_PORT_IO))
2034 if (st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))
2037 if (!st->aux.port[port].nmp.addr)
2043 static int nvi_aux_mpu_call_post(struct nvi_state *st,
2048 nvi_aux_dbg(st, tag, ret);
2052 /* See the mpu.h file for details on the nvi_mpu_ calls.
2054 int nvi_mpu_dev_valid(struct nvi_mpu_port *nmp, u8 *data)
2056 struct nvi_state *st = nvi_state_local;
2060 if (st->sts & NVI_DBG_SPEW_AUX)
2061 pr_info("%s\n", __func__);
2063 pr_debug("%s ERR -EAGAIN\n", __func__);
2070 if ((nmp->addr & BIT_I2C_READ) && (data == NULL))
2074 if (!(st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))) {
2075 nvi_pm(st, __func__, NVI_PM_ON);
2076 ret = nvi_aux_dev_valid(st, nmp, data);
2077 nvi_pm(st, __func__, NVI_PM_AUTO);
2078 nvi_aux_dbg(st, "nvi_mpu_dev_valid=", ret);
2080 nvi_mutex_unlock(st);
2083 EXPORT_SYMBOL(nvi_mpu_dev_valid);
2085 int nvi_mpu_port_alloc(struct nvi_mpu_port *nmp, int port)
2087 struct nvi_state *st = nvi_state_local;
2091 if (st->sts & NVI_DBG_SPEW_AUX)
2092 pr_info("%s\n", __func__);
2094 pr_debug("%s ERR -EAGAIN\n", __func__);
2098 if (nmp == NULL || !(nmp->ctrl & BITS_I2C_SLV_CTRL_LEN))
2101 if (port >= AUX_PORT_IO)
2105 if (!(st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))) {
2106 nvi_pm(st, __func__, NVI_PM_ON);
2107 ret = nvi_aux_port_alloc(st, nmp, port);
2108 if (ret >= 0 && st->hal->dmp)
2109 /* need to reinitialize DMP for new device */
2110 st->hal->dmp->fn_init(st);
2111 nvi_pm(st, __func__, NVI_PM_AUTO);
2112 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_port_alloc=", ret);
2114 nvi_mutex_unlock(st);
2117 EXPORT_SYMBOL(nvi_mpu_port_alloc);
2119 int nvi_mpu_port_free(int port)
2121 struct nvi_state *st = nvi_state_local;
2125 if (st->sts & NVI_DBG_SPEW_AUX)
2126 pr_info("%s port %d\n", __func__, port);
2128 pr_debug("%s port %d ERR -EAGAIN\n", __func__, port);
2133 ret = nvi_aux_mpu_call_pre(st, port);
2135 nvi_pm(st, __func__, NVI_PM_ON);
2136 ret = nvi_aux_port_free(st, port);
2137 nvi_pm(st, __func__, NVI_PM_AUTO);
2138 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_port_free=", ret);
2140 nvi_mutex_unlock(st);
2143 EXPORT_SYMBOL(nvi_mpu_port_free);
2145 int nvi_mpu_enable(unsigned int port_mask, bool enable)
2147 struct nvi_state *st = nvi_state_local;
2152 if (st->sts & NVI_DBG_SPEW_AUX)
2153 pr_info("%s port_mask %x: %x\n",
2154 __func__, port_mask, enable);
2156 pr_debug("%s port_mask %x: %x ERR -EAGAIN\n",
2157 __func__, port_mask, enable);
2161 if (port_mask >= (1 << AUX_PORT_IO) || !port_mask)
2164 for (i = 0; i < AUX_PORT_IO; i++) {
2165 if (port_mask & (1 << i)) {
2166 if (!st->aux.port[i].nmp.addr)
2172 if (st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND)) {
2175 nvi_pm(st, __func__, NVI_PM_ON);
2176 ret = nvi_aux_port_enable(st, port_mask, enable);
2177 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_enable=", ret);
2179 nvi_mutex_unlock(st);
2182 EXPORT_SYMBOL(nvi_mpu_enable);
2184 int nvi_mpu_delay_ms(int port, u8 delay_ms)
2186 struct nvi_state *st = nvi_state_local;
2190 if (st->sts & NVI_DBG_SPEW_AUX)
2191 pr_info("%s port %d: %u\n", __func__, port, delay_ms);
2193 pr_debug("%s port %d: %u ERR -EAGAIN\n",
2194 __func__, port, delay_ms);
2199 ret = nvi_aux_mpu_call_pre(st, port);
2201 st->aux.port[port].nmp.delay_ms = delay_ms;
2202 if (st->rc.i2c_slv_ctrl[port] & BIT_SLV_EN)
2203 ret = nvi_aux_delay(st, __func__);
2204 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_delay_ms=", ret);
2206 nvi_mutex_unlock(st);
2209 EXPORT_SYMBOL(nvi_mpu_delay_ms);
2211 int nvi_mpu_data_out(int port, u8 data_out)
2213 struct nvi_state *st = nvi_state_local;
2219 ret = nvi_aux_mpu_call_pre(st, port);
2221 if (st->rc.i2c_slv_ctrl[port] & BIT_SLV_EN) {
2222 ret = nvi_aux_port_data_out(st, port, data_out);
2224 st->aux.port[port].nmp.data_out = data_out;
2225 st->aux.port[port].hw_do = false;
2232 EXPORT_SYMBOL(nvi_mpu_data_out);
2234 int nvi_mpu_batch(int port, unsigned int period_us, unsigned int timeout_us)
2236 struct nvi_state *st = nvi_state_local;
2240 if (st->sts & NVI_DBG_SPEW_AUX)
2241 pr_info("%s port %d: p=%u t=%u\n",
2242 __func__, port, period_us, timeout_us);
2244 pr_debug("%s port %d: p=%u t=%u ERR -EAGAIN\n",
2245 __func__, port, period_us, timeout_us);
2250 ret = nvi_aux_mpu_call_pre(st, port);
2252 if (timeout_us && ((st->aux.port[port].nmp.id == ID_INVALID) ||
2253 (st->aux.port[port].nmp.id >= ID_INVALID_END))) {
2254 /* sensor not supported by DMP */
2257 st->aux.port[port].period_us = period_us;
2258 st->aux.port[port].timeout_us = timeout_us;
2259 ret = nvi_period_aux(st);
2260 if (st->en_msk & (1 << DEV_DMP) &&
2261 st->hal->dmp->fn_dev_batch) {
2262 /* batch can be done real-time with DMP on */
2264 ret = st->hal->dmp->fn_dev_batch(st, DEV_AUX,
2268 /* timings changed */
2271 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_batch=", ret);
2274 nvi_mutex_unlock(st);
2277 EXPORT_SYMBOL(nvi_mpu_batch);
2279 int nvi_mpu_flush(int port)
2281 struct nvi_state *st = nvi_state_local;
2285 if (st->sts & NVI_DBG_SPEW_AUX)
2286 pr_info("%s port %d\n", __func__, port);
2288 pr_debug("%s port %d ERR -EAGAIN\n", __func__, port);
2293 ret = nvi_aux_mpu_call_pre(st, port);
2295 if (st->hal->dev[DEV_AUX]->fifo_en_msk) {
2296 /* HW flush only when FIFO is used for AUX */
2297 st->aux.port[port].flush = true;
2298 ret = nvi_read(st, true);
2300 nvi_flush_aux(st, port);
2302 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_flush=", ret);
2304 nvi_mutex_unlock(st);
2307 EXPORT_SYMBOL(nvi_mpu_flush);
2309 int nvi_mpu_fifo(int port, unsigned int *reserve, unsigned int *max)
2311 struct nvi_state *st = nvi_state_local;
2315 if (st->sts & NVI_DBG_SPEW_AUX)
2316 pr_info("%s port %d\n", __func__, port);
2318 pr_debug("%s port %d ERR -EAGAIN\n", __func__, port);
2323 ret = nvi_aux_mpu_call_pre(st, port);
2325 if ((st->aux.port[port].nmp.id != ID_INVALID) &&
2326 (st->aux.port[port].nmp.id < ID_INVALID_END)) {
2328 /* batch not supported at this time */
2331 /* batch not supported at this time */
2333 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_fifo=", 0);
2338 nvi_mutex_unlock(st);
2341 EXPORT_SYMBOL(nvi_mpu_fifo);
2343 int nvi_mpu_bypass_request(bool enable)
2345 struct nvi_state *st = nvi_state_local;
2349 if (st->sts & NVI_DBG_SPEW_AUX)
2350 pr_info("%s enable=%x\n", __func__, enable);
2352 pr_debug("%s ERR -EAGAIN\n", __func__);
2357 if (!(st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))) {
2358 nvi_pm(st, __func__, NVI_PM_ON);
2359 ret = nvi_aux_bypass_request(st, enable);
2360 nvi_pm(st, __func__, NVI_PM_AUTO);
2361 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_bypass_request=",
2364 nvi_mutex_unlock(st);
2367 EXPORT_SYMBOL(nvi_mpu_bypass_request);
2369 int nvi_mpu_bypass_release(void)
2371 struct nvi_state *st = nvi_state_local;
2374 if (st->sts & NVI_DBG_SPEW_AUX)
2375 pr_info("%s\n", __func__);
2377 pr_debug("%s\n", __func__);
2382 if (!(st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))) {
2383 nvi_pm(st, __func__, NVI_PM_ON);
2384 nvi_aux_bypass_release(st);
2385 nvi_pm(st, __func__, NVI_PM_AUTO);
2386 nvi_aux_mpu_call_post(st, "nvi_mpu_bypass_release", 0);
2388 nvi_mutex_unlock(st);
2391 EXPORT_SYMBOL(nvi_mpu_bypass_release);
2394 int nvi_reset(struct nvi_state *st, const char *fn,
2395 bool rst_fifo, bool rst_i2c, bool en_irq)
2399 bool rst_dmp = false;
2403 ret = nvi_int_able(st, __func__, false);
2405 if (rst_i2c || st->aux.reset_i2c) {
2406 st->aux.reset_i2c = false;
2408 ret |= nvi_aux_enable(st, __func__, false, false);
2409 val |= BIT_I2C_MST_RST;
2412 st->aux.reset_fifo = false;
2413 val |= BIT_FIFO_RST;
2414 if (st->en_msk & (1 << DEV_DMP)) {
2417 ret |= nvi_aux_enable(st, __func__, false, false);
2420 ret |= nvi_user_ctrl_en(st, __func__,
2421 !rst_fifo, !rst_fifo, !rst_i2c, false);
2422 val |= st->rc.user_ctrl;
2423 ret |= nvi_user_ctrl_rst(st, val);
2424 if (rst_i2c || rst_dmp)
2425 ret |= nvi_aux_enable(st, __func__, true, false);
2426 ts = nvs_timestamp();
2428 for (i = 0; i < st->hal->src_n; i++) {
2429 st->src[i].ts_reset = true;
2430 st->src[i].ts_1st = ts;
2431 st->src[i].ts_end = ts;
2432 st->src[i].ts_period = st->src[i].period_us_src * 1000;
2435 for (i = 0; i < DEV_N_AUX; i++) {
2436 st->snsr[i].ts_reset = true;
2437 st->snsr[i].ts_last = ts;
2438 st->snsr[i].ts_n = 0;
2441 for (i = 0; i < AUX_PORT_MAX; i++) {
2442 st->aux.port[i].ts_reset = true;
2443 st->aux.port[i].ts_last = ts;
2448 ret |= st->hal->dmp->fn_clk_n(st, &st->dmp_clk_n);
2449 st->src[SRC_DMP].ts_reset = true;
2450 st->src[SRC_DMP].ts_1st = ts;
2451 st->src[SRC_DMP].ts_end = ts;
2452 st->src[SRC_DMP].ts_period =
2453 st->src[SRC_DMP].period_us_src * 1000;
2457 ret |= nvi_user_ctrl_en(st, __func__, true, true, true, en_irq);
2458 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG |
2459 NVI_DBG_SPEW_FIFO | NVI_DBG_SPEW_TS))
2460 dev_info(&st->i2c->dev,
2461 "%s-%s DMP=%x FIFO=%x I2C=%x ts=%lld err=%d\n",
2462 __func__, fn, rst_dmp, rst_fifo, rst_i2c, ts, ret);
2466 s64 nvi_ts_dev(struct nvi_state *st, s64 ts_now,
2467 unsigned int dev, unsigned int aux_port)
2473 if (st->en_msk & (1 << DEV_DMP))
2476 src = st->hal->dev[dev]->src;
2481 ts = nvs_timestamp();
2483 if (dev == DEV_AUX && aux_port < AUX_PORT_MAX) {
2484 if (st->aux.port[aux_port].ts_reset) {
2485 st->aux.port[aux_port].ts_reset = false;
2486 ts = st->src[src].ts_1st;
2488 ts = st->src[src].ts_period;
2489 if (st->aux.port[aux_port].odr)
2490 ts *= (st->aux.port[aux_port].odr + 1);
2491 ts += st->aux.port[aux_port].ts_last;
2494 if (st->snsr[dev].ts_reset) {
2495 st->snsr[dev].ts_reset = false;
2496 ts = st->src[src].ts_1st;
2498 ts = st->src[src].ts_period;
2499 if (st->snsr[dev].odr)
2500 ts *= (st->snsr[dev].odr + 1);
2501 ts += st->snsr[dev].ts_last;
2505 if (st->sts & (NVI_DBG_SPEW_FIFO | NVI_DBG_SPEW_TS))
2506 dev_info(&st->i2c->dev,
2507 "%s ts > ts_now (%lld > %lld)\n",
2508 __func__, ts, ts_now);
2512 if (dev == DEV_AUX && aux_port < AUX_PORT_MAX) {
2513 if (ts < st->aux.port[aux_port].ts_last)
2516 st->aux.port[aux_port].ts_last = ts;
2518 if (ts < st->snsr[dev].ts_last)
2521 st->snsr[dev].ts_last = ts;
2523 if (ts < st->snsr[dev].ts_push_delay)
2525 if (st->sts & NVI_DBG_SPEW_FIFO && src >= 0)
2526 dev_info(&st->i2c->dev,
2527 "src[%d] ts_period=%lld ts_end=%lld %s ts[%u]=%lld\n",
2528 src, st->src[src].ts_period, st->src[src].ts_end,
2529 st->snsr[dev].cfg.name, st->snsr[dev].ts_n, ts);
2530 st->snsr[dev].ts_n++;
2534 static void nvi_aux_rd(struct nvi_state *st)
2538 struct aux_port *ap;
2543 if ((!st->aux.ext_data_n) || (!(st->rc.user_ctrl & BIT_I2C_MST_EN)))
2546 ret = nvi_i2c_r(st, st->hal->reg->ext_sens_data_00.bank,
2547 st->hal->reg->ext_sens_data_00.reg,
2548 st->aux.ext_data_n, (u8 *)&st->aux.ext_data);
2552 ts = nvi_ts_dev(st, 0, DEV_AUX, -1);
2553 for (i = 0; i < AUX_PORT_IO; i++) {
2554 ap = &st->aux.port[i];
2555 if ((st->rc.i2c_slv_ctrl[i] & BIT_SLV_EN) &&
2556 (ap->nmp.addr & BIT_I2C_READ) &&
2557 (ap->nmp.handler != NULL)) {
2558 p = &st->aux.ext_data[ap->ext_data_offset];
2559 len = ap->nmp.ctrl & BITS_I2C_SLV_CTRL_LEN;
2560 ap->nmp.handler(p, len, ts, ap->nmp.ext_driver);
2565 static s32 nvi_matrix(struct nvi_state *st, signed char *matrix,
2566 s32 x, s32 y, s32 z, unsigned int axis)
2568 return ((matrix[0 + axis] == 1 ? x :
2569 (matrix[0 + axis] == -1 ? -x : 0)) +
2570 (matrix[3 + axis] == 1 ? y :
2571 (matrix[3 + axis] == -1 ? -y : 0)) +
2572 (matrix[6 + axis] == 1 ? z :
2573 (matrix[6 + axis] == -1 ? -z : 0)));
2576 int nvi_push(struct nvi_state *st, unsigned int dev, u8 *buf, s64 ts)
2583 unsigned int buf_le_i;
2590 ch_sz = abs(st->snsr[dev].cfg.ch_sz);
2592 if (st->snsr[dev].buf_n) {
2593 n = st->snsr[dev].buf_n / st->snsr[dev].cfg.ch_n;
2594 m = st->snsr[dev].buf_n % st->snsr[dev].cfg.ch_n;
2600 /* convert big endian byte stream to little endian channel data */
2601 for (ch = 0; ch < st->snsr[dev].cfg.ch_n; ch++) {
2603 if (st->snsr[dev].enable & (1 << ch)) {
2604 if (m && ch == (st->snsr[dev].cfg.ch_n - 1)) {
2605 /* handle last channel misalignment */
2606 for (i = 0; i < m; i++) {
2608 val_le[ch] |= (u8)*buf++;
2610 /* extend sign bit */
2611 i = (sizeof(val_le[ch]) - m) * 8;
2615 for (i = 0; i < n; i++) {
2617 val_le[ch] |= (u8)*buf++;
2619 /* extend sign bit */
2620 i = (sizeof(val_le[ch]) - n) * 8;
2629 /* shift HW data size to channel size if needed */
2630 if (st->snsr[dev].buf_shft) {
2631 if (st->snsr[dev].buf_shft < 0) {
2632 n = abs(st->snsr[dev].buf_shft);
2633 for (ch = 0; ch < st->snsr[dev].cfg.ch_n; ch++)
2636 for (ch = 0; ch < st->snsr[dev].cfg.ch_n; ch++)
2637 val_le[ch] <<= st->snsr[dev].buf_shft;
2641 /* apply matrix if needed */
2642 if (st->snsr[dev].matrix) {
2643 for (ch = 0; ch < AXIS_N; ch++)
2644 val[ch] = val_le[ch];
2646 for (ch = 0; ch < AXIS_N; ch++)
2647 val_le[ch] = nvi_matrix(st, st->snsr[dev].cfg.matrix,
2648 val[AXIS_X], val[AXIS_Y],
2652 /* convert little endian channel data to little endian byte stream */
2654 for (ch = 0; ch < st->snsr[dev].cfg.ch_n; ch++) {
2655 u_val = (u32)val_le[ch];
2656 for (i = 0; i < ch_sz; i++) {
2657 buf_le[buf_le_i + i] = (u8)(u_val & 0xFF);
2663 /* add status if needed (no endian conversion) */
2664 if (buf_le_i < st->snsr[dev].cfg.snsr_data_n) {
2665 n = st->snsr[dev].cfg.snsr_data_n - buf_le_i;
2666 u_val = st->snsr[dev].sts;
2667 for (i = 0; i < n; i++) {
2668 buf_le[buf_le_i + i] = (u8)(u_val & 0xFF);
2674 if (st->sts & (NVI_DBG_SPEW_SNSR << dev)) {
2676 st->sts |= NVS_STS_SPEW_DATA;
2677 st->nvs->handler(st->snsr[dev].nvs_st, buf_le, ts);
2678 if (!(sts & NVS_STS_SPEW_DATA))
2679 st->sts &= ~NVS_STS_SPEW_DATA;
2681 st->nvs->handler(st->snsr[dev].nvs_st, buf_le, ts);
2687 static int nvi_push_event(struct nvi_state *st, unsigned int dev)
2689 s64 ts = nvs_timestamp();
2694 if (st->sts & (NVI_DBG_SPEW_SNSR << dev)) {
2696 st->sts |= NVS_STS_SPEW_DATA;
2697 ret = st->nvs->handler(st->snsr[dev].nvs_st, &val, ts);
2698 if (!(sts & NVS_STS_SPEW_DATA))
2699 st->sts &= ~NVS_STS_SPEW_DATA;
2701 ret = st->nvs->handler(st->snsr[dev].nvs_st, &val, ts);
2706 static int nvi_push_oneshot(struct nvi_state *st, unsigned int dev)
2708 /* disable now to avoid reinitialization on handler's disable */
2709 st->snsr[dev].enable = 0;
2710 st->en_msk &= ~(1 << dev);
2711 return nvi_push_event(st, dev);
2714 static int nvi_dev_rd(struct nvi_state *st, unsigned int dev)
2720 if (!st->snsr[dev].enable)
2723 len = st->snsr[dev].cfg.ch_n << 1;
2724 ret = nvi_i2c_r(st, st->hal->reg->out_h[dev].bank,
2725 st->hal->reg->out_h[dev].reg, len, buf);
2727 ret = nvi_push(st, dev, buf, nvi_ts_dev(st, 0, dev, 0));
2731 static int nvi_fifo_aux(struct nvi_state *st, s64 ts, unsigned int n)
2733 struct aux_port *ap;
2734 unsigned int fifo_data_n;
2737 ts = nvi_ts_dev(st, ts, DEV_AUX, -1);
2738 for (port = 0; port < AUX_PORT_IO; port++) {
2739 ap = &st->aux.port[port];
2740 if (st->rc.fifo_en & (1 << st->hal->bit->slv_fifo_en[port])) {
2741 fifo_data_n = ap->nmp.ctrl & BITS_I2C_SLV_CTRL_LEN;
2742 if (fifo_data_n > n)
2745 ap->nmp.handler(&st->buf[st->buf_i], fifo_data_n, ts,
2746 ap->nmp.ext_driver);
2747 st->buf_i += fifo_data_n;
2750 if (st->sts & (NVS_STS_SUSPEND | NVS_STS_SHUTDOWN))
2757 static int nvi_fifo_dev_rd(struct nvi_state *st, s64 ts, unsigned int n,
2760 if (st->sts & (NVS_STS_SUSPEND | NVS_STS_SHUTDOWN))
2763 if (st->hal->dev[dev]->fifo_data_n > n)
2766 nvi_push(st, dev, &st->buf[st->buf_i], nvi_ts_dev(st, ts, dev, 0));
2767 st->buf_i += st->hal->dev[dev]->fifo_data_n;
2771 static int nvi_fifo_dev(struct nvi_state *st, s64 ts, unsigned int n)
2776 dev = st->hal->fifo_dev[(st->rc.fifo_cfg >> 2) & 0x07];
2778 ret = nvi_fifo_aux(st, ts, n);
2780 ret = nvi_fifo_dev_rd(st, ts, n, dev);
2784 static int nvi_fifo_devs(struct nvi_state *st, s64 ts, unsigned int n)
2789 for (dev = 0; dev < DEV_MPU_N; dev++) {
2790 if (st->rc.fifo_en & st->hal->dev[dev]->fifo_en_msk) {
2791 ret = nvi_fifo_dev_rd(st, ts, n, dev);
2797 if (st->rc.fifo_en & st->hal->dev[DEV_AUX]->fifo_en_msk)
2798 ret = nvi_fifo_aux(st, ts, n);
2802 /* fifo_n_max can be used if we want to round-robin FIFOs */
2803 static int nvi_fifo_rd(struct nvi_state *st, int src, unsigned int fifo_n_max,
2804 int (*fn)(struct nvi_state *st, s64 ts, unsigned int n))
2813 unsigned int fifo_n;
2817 ts_end = nvs_timestamp();
2820 ret = st->hal->dmp->fn_clk_n(st, &dmp_clk_n);
2821 ret |= nvi_i2c_rd(st, &st->hal->reg->fifo_count_h, (u8 *)&fifo_count);
2822 if (ret || !fifo_count)
2825 ts_now = nvs_timestamp();
2826 if (ts_now < (ts_end + 5000000))
2830 ts_end = atomic64_read(&st->ts_irq);
2831 fifo_n = (unsigned int)be16_to_cpu(fifo_count);
2832 if (st->sts & NVS_STS_SPEW_IRQ)
2833 dev_info(&st->i2c->dev,
2834 "src=%d sync=%x fifo_n=%u ts_clk_n=%u ts_diff=%lld\n",
2835 src, sync, fifo_n, dmp_clk_n, ts_now - st->ts_now);
2836 st->ts_now = ts_now;
2839 if (dmp_clk_n > st->dmp_clk_n)
2840 ts_n = dmp_clk_n - st->dmp_clk_n;
2842 /* counter rolled over */
2843 ts_n = (~st->dmp_clk_n + 1) + dmp_clk_n;
2844 /* ts_n is the number of DMP clock ticks since last time */
2845 st->dmp_clk_n = dmp_clk_n;
2847 fifo_n_max = 0; /* DMP disables round-robin FIFOs */
2850 ts_n = fifo_n / st->src[src].fifo_data_n; /* TS's needed */
2851 if ((fifo_n % st->src[src].fifo_data_n) || !ts_n)
2852 /* reset FIFO if doesn't divide cleanly */
2857 ts_period = st->src[src].period_us_src;
2859 if (sync && ts_end > st->src[src].ts_end && ts_end < ts_now &&
2860 ts_end > (ts_now - (ts_period >> 2)))
2861 /* ts_irq is within the rate so sync to IRQ */
2863 if (st->src[src].ts_reset) {
2864 st->src[src].ts_reset = false;
2865 ts_end = st->src[src].ts_period * (ts_n - 1);
2867 st->src[src].ts_1st = ts_now - ts_end;
2868 st->src[src].ts_end = st->src[src].ts_1st;
2871 ts_end = st->src[src].ts_period * ts_n;
2873 ts_end += st->src[src].ts_end;
2874 /* ts_now will be sent to nvi_ts_dev where the timestamp is
2875 * prevented from going into the future which allows some
2876 * tolerance here for ts_end being a little more than ts_now.
2877 * The more tolerance we have the less recalculating the period
2878 * to avoid swing around the true period. Plus, the clamp on
2879 * ts_now in nvi_ts_dev has the benefit of "syncing" with the
2880 * current calculations per device.
2882 if (ts_end > (ts_now + (ts_period >> 3)) || (sync && (ts_end <
2883 (ts_now - (ts_period >> 1))))) {
2884 if (st->sts & (NVI_DBG_SPEW_FIFO | NVI_DBG_SPEW_TS)) {
2885 dev_info(&st->i2c->dev,
2886 "sync=%x now=%lld end=%lld ts_n=%u\n",
2887 sync, ts_now, ts_end, ts_n);
2888 dev_info(&st->i2c->dev,
2889 "src=%d old period=%lld end=%lld\n",
2890 src, st->src[src].ts_period,
2891 st->src[src].ts_end);
2893 /* st->src[src].ts_period needs to be adjusted */
2894 ts_period = ts_now - st->src[src].ts_end;
2895 do_div(ts_period, ts_n);
2896 st->src[src].ts_period = ts_period;
2897 ts_end = ts_period * ts_n;
2898 ts_end += st->src[src].ts_end;
2899 if (st->sts & (NVI_DBG_SPEW_FIFO | NVI_DBG_SPEW_TS))
2900 dev_info(&st->i2c->dev,
2901 "src=%d new period=%lld end=%lld\n",
2902 src, ts_period, ts_end);
2905 /* would only apply to FIFO timing (non-DMP) */
2906 if (fifo_n_max < fifo_n) {
2907 fifo_n = fifo_n_max;
2908 ts_n = fifo_n / st->src[src].fifo_data_n;
2909 ts_end = st->src[src].ts_period * ts_n;
2910 ts_end += st->src[src].ts_end;
2913 st->src[src].ts_end = ts_end;
2915 /* wasn't able to calculate TS */
2920 buf_n = sizeof(st->buf) - st->buf_i;
2923 ret = nvi_i2c_r(st, st->hal->reg->fifo_rw.bank,
2924 st->hal->reg->fifo_rw.reg,
2925 buf_n, &st->buf[st->buf_i]);
2932 /* fn updates st->buf_i */
2933 while (st->buf_i < buf_n) {
2934 ret = fn(st, ts_now, buf_n - st->buf_i);
2935 /* ret < 0: error to exit
2936 * ret = 0: not enough data to process
2937 * ret > 0: all done processing data
2945 memcpy(st->buf, &st->buf[st->buf_i], buf_n);
2957 static int nvi_rd(struct nvi_state *st)
2965 if (st->en_msk & (1 << DEV_DMP)) {
2966 if (st->en_msk & ((1 << DEV_SM) | (1 << DEV_STP))) {
2967 ret = nvi_i2c_rd(st, &st->hal->reg->int_dmp, &val);
2968 if (val & (1 << st->hal->bit->dmp_int_sm))
2969 nvi_push_oneshot(st, DEV_SM);
2970 if (val & (1 << st->hal->bit->dmp_int_stp))
2971 nvi_push_event(st, DEV_STP);
2973 if (st->en_msk & st->dmp_en_msk)
2975 return nvi_fifo_rd(st, -1, 0, st->hal->dmp->fn_rd);
2981 if (st->pm == NVI_PM_ON_CYCLE) {
2982 /* only low power accelerometer data */
2983 nvi_pm(st, __func__, NVI_PM_ON);
2984 ret = nvi_dev_rd(st, DEV_ACC);
2985 nvi_pm(st, __func__, NVI_PM_AUTO);
2989 nvi_dev_rd(st, DEV_TMP);
2990 if (!(st->rc.fifo_en & st->hal->dev[DEV_AUX]->fifo_en_msk))
2992 /* handle FIFO enabled data */
2993 if (st->rc.fifo_cfg & 0x01) {
2994 /* multi FIFO enabled */
2995 int_msk = 1 << st->hal->bit->int_data_rdy_0;
2996 for (fifo = 0; fifo < st->hal->fifo_n; fifo++) {
2997 if (st->rc.int_enable & (int_msk << fifo)) {
2998 ret = nvi_wr_fifo_cfg(st, fifo);
3002 src = st->hal->dev[st->hal->
3003 fifo_dev[fifo]]->src;
3004 ret = nvi_fifo_rd(st, src, 0, nvi_fifo_dev);
3005 if (st->buf_i || (ret < 0)) {
3006 /* HW FIFO misalignment - reset */
3013 /* st->fifo_src is either SRC_MPU or the source for the single
3014 * device enabled for the single FIFO in ICM.
3016 ret = nvi_fifo_rd(st, st->fifo_src, 0, nvi_fifo_devs);
3017 if (st->buf_i || (ret < 0)) {
3018 /* HW FIFO misalignment - reset */
3027 static int nvi_read(struct nvi_state *st, bool flush)
3031 if (st->irq_dis && !(st->sts & NVS_STS_SHUTDOWN)) {
3032 dev_err(&st->i2c->dev, "%s ERR: IRQ storm reset. n=%u\n",
3033 __func__, st->irq_storm_n);
3034 st->irq_storm_n = 0;
3035 nvi_pm(st, __func__, NVI_PM_ON);
3036 nvi_wr_pm1(st, __func__, BIT_H_RESET);
3039 } else if (!(st->sts & (NVS_STS_SUSPEND | NVS_STS_SHUTDOWN))) {
3042 nvi_en(st); /* a little harder reset for ICM DMP */
3044 nvi_reset(st, __func__, true, false, true);
3051 static irqreturn_t nvi_thread(int irq, void *dev_id)
3053 struct nvi_state *st = (struct nvi_state *)dev_id;
3056 nvi_read(st, false);
3057 nvi_mutex_unlock(st);
3061 static irqreturn_t nvi_handler(int irq, void *dev_id)
3063 struct nvi_state *st = (struct nvi_state *)dev_id;
3064 u64 ts = nvs_timestamp();
3065 u64 ts_old = atomic64_xchg(&st->ts_irq, ts);
3066 u64 ts_diff = ts - ts_old;
3068 /* test for MPU IRQ storm problem */
3069 if (ts_diff < NVI_IRQ_STORM_MIN_NS) {
3071 if (st->irq_storm_n > NVI_IRQ_STORM_MAX_N)
3072 nvi_disable_irq(st);
3074 st->irq_storm_n = 0;
3077 if (st->sts & NVS_STS_SPEW_IRQ)
3078 dev_info(&st->i2c->dev, "%s ts=%llu ts_diff=%llu irq_dis=%x\n",
3079 __func__, ts, ts_diff, st->irq_dis);
3080 return IRQ_WAKE_THREAD;
3083 static int nvi_enable(void *client, int snsr_id, int enable)
3085 struct nvi_state *st = (struct nvi_state *)client;
3088 /* return current enable request status */
3089 return st->snsr[snsr_id].enable;
3091 if (st->snsr[snsr_id].enable == enable)
3092 /* nothing has changed with enable request */
3095 st->snsr[snsr_id].enable = enable;
3097 /* officially flagged as off here */
3098 st->en_msk &= ~(1 << snsr_id);
3099 if (st->sts & NVS_STS_SUSPEND)
3100 /* speed up suspend/resume by not doing nvi_en for every dev */
3103 if (snsr_id == DEV_TMP)
3104 /* this is a static sensor that will be read when gyro is on */
3107 if (st->en_msk & (1 << DEV_DMP)) {
3108 /* DMP is currently on */
3109 if (!(st->en_msk & st->dmp_en_msk))
3110 /* DMP may get turned off (may stay on due to batch) so
3111 * we update timings that may have changed while DMP
3116 nvi_period_src(st, st->hal->dev[snsr_id]->src);
3122 static int nvi_batch(void *client, int snsr_id, int flags,
3123 unsigned int period, unsigned int timeout)
3125 struct nvi_state *st = (struct nvi_state *)client;
3128 if (timeout && !st->snsr[snsr_id].cfg.fifo_max_evnt_cnt)
3131 if (snsr_id == DEV_TMP)
3134 if (period == st->snsr[snsr_id].period_us &&
3135 timeout == st->snsr[snsr_id].timeout_us)
3138 st->snsr[snsr_id].period_us = period;
3139 st->snsr[snsr_id].timeout_us = timeout;
3140 if (!st->snsr[snsr_id].enable)
3143 ret = nvi_timeout(st);
3144 if (st->en_msk & (1 << DEV_DMP)) {
3145 if (st->hal->dmp->fn_dev_batch)
3146 /* batch can be done in real-time with the DMP on */
3148 ret = st->hal->dmp->fn_dev_batch(st, snsr_id, -1);
3152 ret |= nvi_period_src(st, st->hal->dev[snsr_id]->src);
3160 static int nvi_flush(void *client, int snsr_id)
3162 struct nvi_state *st = (struct nvi_state *)client;
3165 if (st->snsr[snsr_id].enable) {
3166 st->snsr[snsr_id].flush = true;
3167 ret = nvi_read(st, true);
3172 static int nvi_max_range(void *client, int snsr_id, int max_range)
3174 struct nvi_state *st = (struct nvi_state *)client;
3176 unsigned int i = max_range;
3179 if (snsr_id < 0 || snsr_id >= DEV_N)
3182 if (st->snsr[snsr_id].enable)
3183 /* can't change settings on the fly (disable device first) */
3186 if (i > st->hal->dev[snsr_id]->rr_0n)
3187 /* clamp to highest setting */
3188 i = st->hal->dev[snsr_id]->rr_0n;
3189 st->snsr[snsr_id].usr_cfg = i;
3190 st->snsr[snsr_id].cfg.resolution.ival =
3191 st->hal->dev[snsr_id]->rr[i].resolution.ival;
3192 st->snsr[snsr_id].cfg.resolution.fval =
3193 st->hal->dev[snsr_id]->rr[i].resolution.fval;
3194 st->snsr[snsr_id].cfg.max_range.ival =
3195 st->hal->dev[snsr_id]->rr[i].max_range.ival;
3196 st->snsr[snsr_id].cfg.max_range.fval =
3197 st->hal->dev[snsr_id]->rr[i].max_range.fval;
3198 st->snsr[snsr_id].cfg.offset.ival = st->hal->dev[snsr_id]->offset.ival;
3199 st->snsr[snsr_id].cfg.offset.fval = st->hal->dev[snsr_id]->offset.fval;
3200 st->snsr[snsr_id].cfg.scale.ival = st->hal->dev[snsr_id]->scale.ival;
3201 st->snsr[snsr_id].cfg.scale.fval = st->hal->dev[snsr_id]->scale.fval;
3202 /* AXIS sensors need resolution put in the scales */
3203 if (st->snsr[snsr_id].cfg.ch_n_max) {
3204 for (ch = 0; ch < st->snsr[snsr_id].cfg.ch_n_max; ch++) {
3205 st->snsr[snsr_id].cfg.scales[ch].ival =
3206 st->snsr[snsr_id].cfg.resolution.ival;
3207 st->snsr[snsr_id].cfg.scales[ch].fval =
3208 st->snsr[snsr_id].cfg.resolution.fval;
3212 if (st->en_msk & (1 << DEV_DMP))
3217 static int nvi_offset(void *client, int snsr_id, int channel, int offset)
3219 struct nvi_state *st = (struct nvi_state *)client;
3223 if (snsr_id >= DEV_AXIS_N || channel >= AXIS_N)
3226 old = st->dev_offset[snsr_id][channel];
3227 st->dev_offset[snsr_id][channel] = offset;
3228 if (st->en_msk & (1 << snsr_id)) {
3231 st->dev_offset[snsr_id][channel] = old;
3239 static int nvi_thresh_lo(void *client, int snsr_id, int thresh_lo)
3241 struct nvi_state *st = (struct nvi_state *)client;
3249 st->snsr[DEV_SM].cfg.thresh_lo = thresh_lo;
3250 if (st->en_msk & (1 << DEV_DMP))
3251 ret = st->hal->dmp->fn_dev_init(st, snsr_id);
3261 static int nvi_thresh_hi(void *client, int snsr_id, int thresh_hi)
3263 struct nvi_state *st = (struct nvi_state *)client;
3269 st->en_msk |= (1 << EN_LP);
3271 st->en_msk &= ~(1 << EN_LP);
3275 st->snsr[DEV_SM].cfg.thresh_hi = thresh_hi;
3276 if (st->en_msk & (1 << DEV_DMP))
3277 ret = st->hal->dmp->fn_dev_init(st, snsr_id);
3287 static int nvi_reset_dev(void *client, int snsr_id)
3289 struct nvi_state *st = (struct nvi_state *)client;
3292 ret = nvi_pm(st, __func__, NVI_PM_ON);
3293 ret |= nvi_wr_pm1(st, __func__, BIT_H_RESET);
3299 static int nvi_self_test(void *client, int snsr_id, char *buf)
3301 struct nvi_state *st = (struct nvi_state *)client;
3304 nvi_pm(st, __func__, NVI_PM_ON);
3305 nvi_aux_enable(st, __func__, false, false);
3306 nvi_user_ctrl_en(st, __func__, false, false, false, false);
3307 if (snsr_id == DEV_ACC)
3308 ret = st->hal->fn->st_acc(st);
3309 else if (snsr_id == DEV_GYR)
3310 ret = st->hal->fn->st_gyr(st);
3313 nvi_aux_enable(st, __func__, true, false);
3317 return snprintf(buf, PAGE_SIZE, "%d FAIL\n", ret);
3319 return snprintf(buf, PAGE_SIZE, "%d PASS\n", ret);
3322 static int nvi_regs(void *client, int snsr_id, char *buf)
3324 struct nvi_state *st = (struct nvi_state *)client;
3331 t = snprintf(buf, PAGE_SIZE, "registers: (only data != 0 shown)\n");
3332 for (j = 0; j < st->hal->reg_bank_n; j++) {
3333 t += snprintf(buf + t, PAGE_SIZE - t, "bank %u:\n", j);
3334 for (i = 0; i < st->hal->regs_n; i++) {
3335 if ((j == st->hal->reg->fifo_rw.bank) &&
3336 (i == st->hal->reg->fifo_rw.reg))
3339 ret = nvi_i2c_r(st, j, i, 1, &data);
3341 t += snprintf(buf + t, PAGE_SIZE - t,
3344 t += snprintf(buf + t, PAGE_SIZE - t,
3345 "0x%02x=0x%02x\n", i, data);
3351 static int nvi_nvs_write(void *client, int snsr_id, unsigned int nvs)
3353 struct nvi_state *st = (struct nvi_state *)client;
3355 switch (nvs & 0xFF) {
3358 case NVI_INFO_REG_WR:
3359 case NVI_INFO_MEM_RD:
3360 case NVI_INFO_MEM_WR:
3361 case NVI_INFO_DMP_FW:
3362 case NVI_INFO_DMP_EN_MSK:
3363 case NVI_INFO_FN_INIT:
3366 case NVI_INFO_DBG_SPEW:
3367 st->sts ^= NVI_DBG_SPEW_MSG;
3370 case NVI_INFO_AUX_SPEW:
3371 st->sts ^= NVI_DBG_SPEW_AUX;
3372 nvi_aux_dbg(st, "SNAPSHOT", 0);
3375 case NVI_INFO_FIFO_SPEW:
3376 st->sts ^= NVI_DBG_SPEW_FIFO;
3379 case NVI_INFO_TS_SPEW:
3380 st->sts ^= NVI_DBG_SPEW_TS;
3384 if (nvs < (NVI_INFO_SNSR_SPEW + DEV_N))
3385 st->sts ^= (NVI_DBG_SPEW_SNSR <<
3386 (nvs - NVI_INFO_SNSR_SPEW));
3395 static int nvi_nvs_read(void *client, int snsr_id, char *buf)
3397 struct nvi_state *st = (struct nvi_state *)client;
3406 st->info = NVI_INFO_VER;
3407 switch (info & 0xFF) {
3409 t = snprintf(buf, PAGE_SIZE, "NVI driver v. %u\n",
3410 NVI_DRIVER_VERSION);
3411 if (st->en_msk & (1 << FW_LOADED)) {
3412 t += snprintf(buf + t, PAGE_SIZE - t, "DMP FW v. %u\n",
3413 st->hal->dmp->fw_ver);
3414 t += snprintf(buf + t, PAGE_SIZE - t,
3416 !!(st->en_msk & (1 << DEV_DMP)));
3418 t += snprintf(buf + t, PAGE_SIZE - t, "standby_en=%x\n",
3419 !!(st->en_msk & (1 << EN_STDBY)));
3420 t += snprintf(buf + t, PAGE_SIZE - t, "bypass_timeout_ms=%u\n",
3421 st->bypass_timeout_ms);
3422 for (i = 0; i < DEV_N_AUX; i++) {
3423 if (st->snsr[i].push_delay_ns)
3424 t += snprintf(buf + t, PAGE_SIZE - t,
3425 "%s_push_delay_ns=%lld\n",
3426 st->snsr[i].cfg.name,
3427 st->snsr[i].push_delay_ns);
3430 for (i = 0; i < DEV_N_AUX; i++) {
3431 if ((st->dmp_dev_msk | MSK_DEV_MPU_AUX) & (1 << i)) {
3432 if (st->dmp_en_msk & (1 << i))
3433 t += snprintf(buf + t, PAGE_SIZE - t,
3435 st->snsr[i].cfg.name);
3437 t += snprintf(buf + t, PAGE_SIZE - t,
3439 st->snsr[i].cfg.name);
3446 t = snprintf(buf, PAGE_SIZE, "en_msk=%x\n", st->en_msk);
3447 t += snprintf(buf + t, PAGE_SIZE - t, "sts=%x\n", st->sts);
3448 t += snprintf(buf + t, PAGE_SIZE - t, "pm=%d\n", st->pm);
3449 t += snprintf(buf + t, PAGE_SIZE - t, "bm_timeout_us=%u\n",
3451 t += snprintf(buf + t, PAGE_SIZE - t, "fifo_src=%d\n",
3453 for (i = 0; i < DEV_N_AUX; i++) {
3454 t += snprintf(buf + t, PAGE_SIZE - t, "snsr[%u] %s:\n",
3455 i, st->snsr[i].cfg.name);
3456 t += snprintf(buf + t, PAGE_SIZE - t, "usr_cfg=%x\n",
3457 st->snsr[i].usr_cfg);
3458 t += snprintf(buf + t, PAGE_SIZE - t, "enable=%x\n",
3459 st->snsr[i].enable);
3460 t += snprintf(buf + t, PAGE_SIZE - t, "period_us=%u\n",
3461 st->snsr[i].period_us);
3462 t += snprintf(buf + t, PAGE_SIZE - t,
3464 st->snsr[i].timeout_us);
3465 t += snprintf(buf + t, PAGE_SIZE - t, "odr=%u\n",
3467 t += snprintf(buf + t, PAGE_SIZE - t, "ts_last=%lld\n",
3468 st->snsr[i].ts_last);
3469 t += snprintf(buf + t, PAGE_SIZE - t, "ts_reset=%x\n",
3470 st->snsr[i].ts_reset);
3471 t += snprintf(buf + t, PAGE_SIZE - t, "flush=%x\n",
3473 t += snprintf(buf + t, PAGE_SIZE - t, "matrix=%x\n",
3474 st->snsr[i].matrix);
3475 t += snprintf(buf + t, PAGE_SIZE - t, "buf_shft=%d\n",
3476 st->snsr[i].buf_shft);
3477 t += snprintf(buf + t, PAGE_SIZE - t, "buf_n=%u\n",
3483 st->hal->dmp->fn_clk_n(st, &n);
3484 t += snprintf(buf + t, PAGE_SIZE - t,
3485 "nvi_dmp_clk_n=%u\n", n);
3486 t += snprintf(buf + t, PAGE_SIZE - t,
3487 "st->dmp_clk_n=%u\n", st->dmp_clk_n);
3492 for (i = 0; i < SRC_N; i++) {
3493 if (i >= st->hal->src_n && i != SRC_DMP)
3496 t += snprintf(buf + t, PAGE_SIZE - t, "src[%u]:\n", i);
3497 t += snprintf(buf + t, PAGE_SIZE - t, "ts_reset=%x\n",
3498 st->src[i].ts_reset);
3499 t += snprintf(buf + t, PAGE_SIZE - t, "ts_end=%lld\n",
3501 t += snprintf(buf + t, PAGE_SIZE - t,
3503 st->src[i].ts_period);
3504 t += snprintf(buf + t, PAGE_SIZE - t,
3505 "period_us_src=%u\n",
3506 st->src[i].period_us_src);
3507 t += snprintf(buf + t, PAGE_SIZE - t,
3508 "period_us_req=%u\n",
3509 st->src[i].period_us_req);
3510 t += snprintf(buf + t, PAGE_SIZE - t,
3511 "period_us_min=%u\n",
3512 st->src[i].period_us_min);
3513 t += snprintf(buf + t, PAGE_SIZE - t,
3514 "period_us_max=%u\n",
3515 st->src[i].period_us_max);
3516 t += snprintf(buf + t, PAGE_SIZE - t,
3518 st->src[i].fifo_data_n);
3519 t += snprintf(buf + t, PAGE_SIZE - t, "base_t=%u\n",
3524 case NVI_INFO_DBG_SPEW:
3525 return snprintf(buf, PAGE_SIZE, "DBG spew=%x\n",
3526 !!(st->sts & NVI_DBG_SPEW_MSG));
3528 case NVI_INFO_AUX_SPEW:
3529 return snprintf(buf, PAGE_SIZE, "AUX spew=%x\n",
3530 !!(st->sts & NVI_DBG_SPEW_AUX));
3532 case NVI_INFO_FIFO_SPEW:
3533 return snprintf(buf, PAGE_SIZE, "FIFO spew=%x\n",
3534 !!(st->sts & NVI_DBG_SPEW_FIFO));
3536 case NVI_INFO_TS_SPEW:
3537 return snprintf(buf, PAGE_SIZE, "TS spew=%x\n",
3538 !!(st->sts & NVI_DBG_SPEW_TS));
3540 case NVI_INFO_REG_WR:
3542 buf_rw[0] = (u8)(info >> 16);
3543 buf_rw[1] = (u8)(info >> 8);
3544 ret = nvi_i2c_write(st, info >> 24, 2, buf_rw);
3545 return snprintf(buf, PAGE_SIZE,
3546 "REG WR: b=%02x r=%02x d=%02x ERR=%d\n",
3547 info >> 24, buf_rw[0], buf_rw[1], ret);
3549 case NVI_INFO_MEM_RD:
3550 n = (info >> 8) & 0xFF;
3553 ret = nvi_mem_rd(st, info >> 16, n, buf_rw);
3555 return snprintf(buf, PAGE_SIZE,
3556 "MEM RD: ERR=%d\n", ret);
3558 t = snprintf(buf, PAGE_SIZE, "MEM RD:\n");
3559 for (i = 0; i < n; i++) {
3561 t += snprintf(buf + t, PAGE_SIZE - t, "%04x: ",
3563 t += snprintf(buf + t, PAGE_SIZE - t, "%02x ",
3566 t += snprintf(buf + t, PAGE_SIZE - t, "\n");
3568 t += snprintf(buf + t, PAGE_SIZE - t, "\n");
3571 case NVI_INFO_MEM_WR:
3573 buf_rw[0] = (u8)(info >> 8);
3574 ret = nvi_mem_wr(st, info >> 16, 1, buf_rw, true);
3575 return snprintf(buf, PAGE_SIZE,
3576 "MEM WR: a=%04x d=%02x ERR=%d\n",
3577 info >> 16, buf_rw[0], ret);
3579 case NVI_INFO_DMP_FW:
3580 ret = nvi_dmp_fw(st);
3581 return snprintf(buf, PAGE_SIZE, "DMP FW: ERR=%d\n", ret);
3583 case NVI_INFO_DMP_EN_MSK:
3584 st->dmp_en_msk = (info >> 8) & MSK_DEV_ALL;
3585 return snprintf(buf, PAGE_SIZE, "st->dmp_en_msk=%x\n",
3588 case NVI_INFO_FN_INIT:
3589 if (st->hal->fn->init) {
3590 ret = st->hal->fn->init(st);
3591 return snprintf(buf, PAGE_SIZE,
3592 "hal->fn->init() ret=%d\n", ret);
3594 return snprintf(buf, PAGE_SIZE,
3595 "no hal->fn->init()\n");
3599 i = info - NVI_INFO_SNSR_SPEW;
3601 return snprintf(buf, PAGE_SIZE, "%s spew=%x\n",
3602 st->snsr[i].cfg.name,
3603 !!(st->sts & (NVI_DBG_SPEW_SNSR << i)));
3610 static struct nvs_fn_dev nvi_nvs_fn = {
3611 .enable = nvi_enable,
3614 .max_range = nvi_max_range,
3615 .offset = nvi_offset,
3616 .thresh_lo = nvi_thresh_lo,
3617 .thresh_hi = nvi_thresh_hi,
3618 .reset = nvi_reset_dev,
3619 .self_test = nvi_self_test,
3621 .nvs_write = nvi_nvs_write,
3622 .nvs_read = nvi_nvs_read,
3626 static int nvi_suspend(struct device *dev)
3628 struct i2c_client *client = to_i2c_client(dev);
3629 struct nvi_state *st = i2c_get_clientdata(client);
3633 s64 ts = 0; /* = 0 to fix compile */
3635 if (st->sts & NVS_STS_SPEW_MSG)
3636 ts = nvs_timestamp();
3637 st->sts |= NVS_STS_SUSPEND;
3639 for (i = 0; i < DEV_N; i++)
3640 ret_t |= st->nvs->suspend(st->snsr[i].nvs_st);
3644 ret_t |= nvi_en(st);
3645 for (i = 0; i < DEV_N; i++) {
3646 if (st->snsr[i].enable && (st->snsr[i].cfg.flags &
3647 SENSOR_FLAG_WAKE_UP)) {
3648 ret = irq_set_irq_wake(st->i2c->irq, 1);
3650 st->irq_set_irq_wake = true;
3655 if (st->sts & NVS_STS_SPEW_MSG)
3656 dev_info(&client->dev,
3657 "%s WAKE_ON=%x elapsed_t=%lldns err=%d\n", __func__,
3658 st->irq_set_irq_wake, nvs_timestamp() - ts, ret_t);
3659 nvi_mutex_unlock(st);
3663 static int nvi_resume(struct device *dev)
3665 struct i2c_client *client = to_i2c_client(dev);
3666 struct nvi_state *st = i2c_get_clientdata(client);
3667 s64 ts = 0; /* = 0 to fix compile */
3671 if (st->sts & NVS_STS_SPEW_MSG)
3672 ts = nvs_timestamp();
3674 if (st->irq_set_irq_wake) {
3675 /* determine if wake source */
3676 ret = nvi_rd_int_status(st);
3678 dev_err(&client->dev, "%s IRQ STS ERR=%d\n",
3681 if (st->sts & NVS_STS_SPEW_MSG)
3682 dev_info(&client->dev,
3683 "%s IRQ STS=%#x DMP=%#x\n", __func__,
3684 st->rc.int_status, st->rc.int_dmp);
3685 if (st->rc.int_status & (1 << st->hal->bit->int_dmp)) {
3686 if (st->rc.int_dmp &
3687 (1 << st->hal->bit->dmp_int_sm))
3688 nvi_push_oneshot(st, DEV_SM);
3691 ret = irq_set_irq_wake(st->i2c->irq, 0);
3693 st->irq_set_irq_wake = false;
3695 nvi_mutex_unlock(st);
3698 for (i = 0; i < DEV_N; i++)
3699 ret |= st->nvs->resume(st->snsr[i].nvs_st);
3703 for (i = 0; i < AUX_PORT_MAX; i++) {
3704 if (st->aux.port[i].nmp.shutdown_bypass)
3707 if (i < AUX_PORT_MAX) {
3708 nvi_pm(st, __func__, NVI_PM_ON);
3709 nvi_aux_bypass_enable(st, false);
3711 st->sts &= ~NVS_STS_SUSPEND;
3714 if (st->sts & NVS_STS_SPEW_MSG)
3715 dev_info(&client->dev, "%s elapsed_t=%lldns err=%d\n",
3716 __func__, nvs_timestamp() - ts, ret);
3717 nvi_mutex_unlock(st);
3721 static const struct dev_pm_ops nvi_pm_ops = {
3722 .suspend = nvi_suspend,
3723 .resume = nvi_resume,
3726 static void nvi_shutdown(struct i2c_client *client)
3728 struct nvi_state *st = i2c_get_clientdata(client);
3731 st->sts |= NVS_STS_SHUTDOWN;
3733 for (i = 0; i < DEV_N; i++)
3734 st->nvs->shutdown(st->snsr[i].nvs_st);
3736 nvi_disable_irq(st);
3738 nvi_user_ctrl_en(st, __func__, false, false, false, false);
3739 nvi_pm(st, __func__, NVI_PM_OFF);
3741 if (st->sts & NVS_STS_SPEW_MSG)
3742 dev_info(&client->dev, "%s\n", __func__);
3745 static int nvi_remove(struct i2c_client *client)
3747 struct nvi_state *st = i2c_get_clientdata(client);
3751 nvi_shutdown(client);
3753 for (i = 0; i < DEV_N; i++)
3754 st->nvs->remove(st->snsr[i].nvs_st);
3758 dev_info(&client->dev, "%s\n", __func__);
3762 static struct nvi_id_hal nvi_id_hals[] = {
3763 { NVI_HW_ID_AUTO, NVI_NAME, &nvi_hal_6050 },
3764 { NVI_HW_ID_MPU6050, NVI_NAME_MPU6050, &nvi_hal_6050 },
3765 { NVI_HW_ID_MPU6500, NVI_NAME_MPU6500, &nvi_hal_6500 },
3766 { NVI_HW_ID_MPU6515, NVI_NAME_MPU6515, &nvi_hal_6515 },
3767 { NVI_HW_ID_MPU9150, NVI_NAME_MPU9150, &nvi_hal_6050 },
3768 { NVI_HW_ID_MPU9250, NVI_NAME_MPU9250, &nvi_hal_6500 },
3769 { NVI_HW_ID_MPU9350, NVI_NAME_MPU9350, &nvi_hal_6515 },
3770 { NVI_HW_ID_ICM20628, NVI_NAME_ICM20628, &nvi_hal_20628 },
3771 { NVI_HW_ID_ICM20630, NVI_NAME_ICM20630, &nvi_hal_20628 },
3772 { NVI_HW_ID_ICM20632, NVI_NAME_ICM20632, &nvi_hal_20628 },
3775 static int nvi_id2hal(struct nvi_state *st, u8 hw_id)
3779 for (i = 1; i < (int)ARRAY_SIZE(nvi_id_hals); i++) {
3780 if (nvi_id_hals[i].hw_id == hw_id) {
3781 st->hal = nvi_id_hals[i].hal;
3789 static int nvi_id_dev(struct nvi_state *st,
3790 const struct i2c_device_id *i2c_dev_id)
3792 u8 hw_id = NVI_HW_ID_AUTO;
3793 unsigned int i = i2c_dev_id->driver_data;
3798 BUG_ON(NVI_NDX_N != ARRAY_SIZE(nvi_i2c_device_id) - 1);
3799 BUG_ON(NVI_NDX_N != ARRAY_SIZE(nvi_id_hals));
3800 st->hal = nvi_id_hals[i].hal;
3801 if (i == NVI_NDX_AUTO) {
3802 nvi_pm_wr(st, __func__, 0, 0, 0);
3803 ret = nvi_i2c_rd(st, &st->hal->reg->who_am_i, &hw_id);
3805 dev_err(&st->i2c->dev, "%s AUTO ID FAILED\n",
3810 ret = nvi_id2hal(st, hw_id);
3812 st->hal = &nvi_hal_20628;
3813 /* cause a master reset by disabling regulators */
3814 nvs_vregs_disable(&st->i2c->dev, st->vreg,
3815 ARRAY_SIZE(nvi_vregs));
3816 ret = nvi_pm_wr(st, __func__, 0, 0, 0);
3817 ret = nvi_i2c_rd(st, &st->hal->reg->who_am_i, &hw_id);
3819 dev_err(&st->i2c->dev, "%s AUTO ID FAILED\n",
3824 ret = nvi_id2hal(st, hw_id);
3826 dev_err(&st->i2c->dev,
3827 "%s hw_id=%x AUTO ID FAILED\n",
3835 /* cause a master reset by disabling regulators */
3836 nvs_vregs_disable(&st->i2c->dev, st->vreg,
3837 ARRAY_SIZE(nvi_vregs));
3838 nvi_pm_wr(st, __func__, 0, 0, 0);
3841 /* populate the rest of st->snsr[dev].cfg */
3842 for (dev = 0; dev < DEV_N; dev++) {
3843 st->snsr[dev].cfg.part = nvi_id_hals[i].name;
3844 st->snsr[dev].cfg.version = st->hal->dev[dev]->version;
3845 st->snsr[dev].cfg.milliamp.ival =
3846 st->hal->dev[dev]->milliamp.ival;
3847 st->snsr[dev].cfg.milliamp.fval =
3848 st->hal->dev[dev]->milliamp.fval;
3851 #define SRM (SENSOR_FLAG_SPECIAL_REPORTING_MODE)
3852 #define OSM (SENSOR_FLAG_ONE_SHOT_MODE)
3853 BUG_ON(SRC_N < st->hal->src_n);
3854 for (dev = 0; dev < DEV_N; dev++) {
3855 src = st->hal->dev[dev]->src;
3859 BUG_ON(src >= st->hal->src_n);
3860 if ((st->snsr[dev].cfg.flags & SRM) != OSM) {
3861 st->snsr[dev].cfg.delay_us_min =
3862 st->hal->src[src].period_us_min;
3863 st->snsr[dev].cfg.delay_us_max =
3864 st->hal->src[src].period_us_max;
3868 ret = nvs_vregs_sts(st->vreg, ARRAY_SIZE(nvi_vregs));
3870 /* regulators aren't supported so manually do master reset */
3871 nvi_wr_pm1(st, __func__, BIT_H_RESET);
3872 for (i = 0; i < AXIS_N; i++) {
3873 st->rom_offset[DEV_ACC][i] = (s16)st->rc.accel_offset[i];
3874 st->rom_offset[DEV_GYR][i] = (s16)st->rc.gyro_offset[i];
3875 st->dev_offset[DEV_ACC][i] = 0;
3876 st->dev_offset[DEV_GYR][i] = 0;
3878 if (st->hal->fn->init)
3879 ret = st->hal->fn->init(st);
3882 if (hw_id == NVI_HW_ID_AUTO)
3883 dev_info(&st->i2c->dev, "%s: USING DEVICE TREE: %s\n",
3884 __func__, i2c_dev_id->name);
3886 dev_info(&st->i2c->dev, "%s: FOUND HW ID=%x USING: %s\n",
3887 __func__, hw_id, st->snsr[0].cfg.part);
3891 static struct sensor_cfg nvi_cfg_dflt[] = {
3893 .name = "accelerometer",
3899 .vendor = NVI_VENDOR,
3900 .float_significance = NVS_FLOAT_NANO,
3902 .thresh_hi = -1, /* LP */
3905 .name = "gyroscope",
3911 .vendor = NVI_VENDOR,
3915 .float_significance = NVS_FLOAT_NANO,
3919 .name = "gyro_temp",
3920 .snsr_id = SENSOR_TYPE_TEMPERATURE,
3923 .vendor = NVI_VENDOR,
3924 .flags = SENSOR_FLAG_ON_CHANGE_MODE,
3925 .float_significance = NVS_FLOAT_NANO,
3928 .name = "significant_motion",
3932 .vendor = NVI_VENDOR,
3934 /* delay_us_max is ignored by NVS since this is a one-shot
3935 * sensor so we use it as a third threshold parameter
3937 .delay_us_max = 200, /* SMD_DELAY2_THLD */
3938 .flags = SENSOR_FLAG_ONE_SHOT_MODE |
3939 SENSOR_FLAG_WAKE_UP,
3940 .thresh_lo = 1500, /* SMD_MOT_THLD */
3941 .thresh_hi = 600, /* SMD_DELAY_THLD */
3944 .name = "step_detector",
3948 .vendor = NVI_VENDOR,
3950 .flags = SENSOR_FLAG_ONE_SHOT_MODE,
3953 .name = "quaternion",
3954 .snsr_id = SENSOR_TYPE_ORIENTATION,
3958 .vendor = NVI_VENDOR,
3959 .delay_us_min = 10000,
3960 .delay_us_max = 255000,
3963 .name = "geomagnetic_rotation_vector",
3968 .vendor = NVI_VENDOR,
3969 .delay_us_min = 10000,
3970 .delay_us_max = 255000,
3973 .name = "gyroscope_uncalibrated",
3978 .vendor = NVI_VENDOR,
3982 .delay_us_min = 10000,
3983 .delay_us_max = 255000,
3984 .float_significance = NVS_FLOAT_NANO,
3989 /* device tree parameters before HAL initialized */
3990 static int nvi_of_dt_pre(struct nvi_state *st, struct device_node *dn)
3996 for (i = 0; i < ARRAY_SIZE(nvi_cfg_dflt); i++)
3997 memcpy(&st->snsr[i].cfg, &nvi_cfg_dflt[i],
3998 sizeof(st->snsr[i].cfg));
3999 st->snsr[DEV_AUX].cfg.name = "auxiliary";
4000 st->en_msk = (1 << EN_STDBY);
4001 st->bypass_timeout_ms = NVI_BYPASS_TIMEOUT_MS;
4005 /* driver specific parameters */
4006 if (!of_property_read_u32(dn, "standby_en", &tmp)) {
4008 st->en_msk |= (1 << EN_STDBY);
4010 st->en_msk &= ~(1 << EN_STDBY);
4012 of_property_read_u32(dn, "bypass_timeout_ms", &st->bypass_timeout_ms);
4013 for (i = 0; i < DEV_N_AUX; i++) {
4014 snprintf(str, sizeof(str), "%s_push_delay_ns",
4015 st->snsr[i].cfg.name);
4016 if (!of_property_read_u32(dn, str, &tmp))
4017 st->snsr[i].push_delay_ns = (s64)tmp;
4023 /* device tree parameters after HAL initialized */
4024 static void nvi_of_dt_post(struct nvi_state *st, struct device_node *dn)
4032 /* sensor specific parameters */
4033 for (i = 0; i < DEV_N; i++)
4034 nvs_of_dt(dn, &st->snsr[i].cfg, NULL);
4036 for (i = 0; i < DEV_N; i++) {
4038 for (j = 0; j < 9; j++)
4039 tmp |= st->snsr[i].cfg.matrix[j];
4041 /* sensor has a matrix */
4042 snprintf(str, sizeof(str), "%s_matrix_enable",
4043 st->snsr[i].cfg.name);
4044 if (!of_property_read_u32(dn, str, &tmp)) {
4045 /* matrix override */
4047 /* apply matrix within kernel */
4048 st->snsr[i].matrix = true;
4050 /* HAL/fusion will handle matrix */
4051 st->snsr[i].matrix = false;
4056 /* sensor overrides that enable the DMP.
4057 * If the sensor is specific to the DMP and this override is
4058 * disable, then the virtual sensor is removed.
4061 st->dmp_dev_msk = st->hal->dmp->dev_msk;
4062 st->dmp_en_msk = st->hal->dmp->en_msk;
4063 for (i = 0; i < DEV_N_AUX; i++) {
4064 snprintf(str, sizeof(str), "%s_dmp_en",
4065 st->snsr[i].cfg.name);
4066 if (!of_property_read_u32(dn, str, &tmp)) {
4069 if (MSK_DEV_DMP & msk)
4070 st->dmp_dev_msk |= msk;
4071 st->dmp_en_msk |= msk;
4074 if (MSK_DEV_DMP & (1 << i))
4075 st->dmp_dev_msk &= msk;
4076 st->dmp_en_msk &= msk;
4083 static int nvi_init(struct nvi_state *st,
4084 const struct i2c_device_id *i2c_dev_id)
4086 struct mpu_platform_data *pdata;
4087 signed char matrix[9];
4092 nvi_of_dt_pre(st, st->i2c->dev.of_node);
4094 ret = nvi_id_dev(st, i2c_dev_id);
4098 if (st->i2c->dev.of_node) {
4099 nvi_of_dt_post(st, st->i2c->dev.of_node);
4101 pdata = dev_get_platdata(&st->i2c->dev);
4103 memcpy(&st->snsr[DEV_ACC].cfg.matrix,
4104 &pdata->orientation,
4105 sizeof(st->snsr[DEV_ACC].cfg.matrix));
4106 memcpy(&st->snsr[DEV_GYR].cfg.matrix,
4107 &pdata->orientation,
4108 sizeof(st->snsr[DEV_GYR].cfg.matrix));
4110 dev_err(&st->i2c->dev, "%s dev_get_platdata ERR\n",
4116 if (st->en_msk & (1 << FW_LOADED))
4119 ret = nvi_dmp_fw(st);
4121 /* remove DMP dependent sensors */
4124 dev_info(&st->i2c->dev, "%s DMP FW loaded\n", __func__);
4125 /* remove DMP dependent sensors not supported by this DMP */
4126 n = MSK_DEV_DMP ^ st->dmp_dev_msk;
4129 for (i = 0; i < DEV_N; i++) {
4131 st->snsr[i].cfg.snsr_id = -1;
4135 nvi_nvs_fn.sts = &st->sts;
4136 nvi_nvs_fn.errs = &st->errs;
4137 st->nvs = nvs_iio();
4138 if (st->nvs == NULL)
4142 for (i = 0; i < DEV_N; i++) {
4143 if (st->snsr[i].matrix) {
4144 /* matrix handled at kernel so remove from NVS */
4145 memcpy(matrix, st->snsr[i].cfg.matrix, sizeof(matrix));
4146 memset(st->snsr[i].cfg.matrix, 0,
4147 sizeof(st->snsr[i].cfg.matrix));
4149 ret = st->nvs->probe(&st->snsr[i].nvs_st, st, &st->i2c->dev,
4150 &nvi_nvs_fn, &st->snsr[i].cfg);
4152 st->snsr[i].cfg.snsr_id = i;
4153 if (st->snsr[i].matrix)
4154 memcpy(st->snsr[i].cfg.matrix, matrix,
4155 sizeof(st->snsr[i].cfg.matrix));
4156 nvi_max_range(st, i, st->snsr[i].cfg.max_range.ival);
4163 ret = request_threaded_irq(st->i2c->irq, nvi_handler, nvi_thread,
4164 IRQF_TRIGGER_RISING, NVI_NAME, st);
4166 dev_err(&st->i2c->dev, "%s req_threaded_irq ERR %d\n",
4171 nvi_pm(st, __func__, NVI_PM_AUTO);
4172 nvi_rc_clr(st, __func__);
4173 st->rc_dis = false; /* enable register cache after initialization */
4174 nvi_state_local = st;
4178 static void nvi_dmp_fw_load_worker(struct work_struct *work)
4180 struct nvi_pdata *pd = container_of(work, struct nvi_pdata,
4182 struct nvi_state *st = &pd->st;
4185 ret = nvi_init(st, pd->i2c_dev_id);
4187 dev_err(&st->i2c->dev, "%s ERR %d\n", __func__, ret);
4188 nvi_remove(st->i2c);
4190 dev_info(&st->i2c->dev, "%s done\n", __func__);
4193 static int nvi_probe(struct i2c_client *client,
4194 const struct i2c_device_id *i2c_dev_id)
4196 struct nvi_pdata *pd;
4197 struct nvi_state *st;
4200 dev_info(&client->dev, "%s %s\n", __func__, i2c_dev_id->name);
4202 dev_err(&client->dev, "%s ERR: no interrupt\n", __func__);
4206 /* just test if global disable */
4207 ret = nvs_of_dt(client->dev.of_node, NULL, NULL);
4208 if (ret == -ENODEV) {
4209 dev_info(&client->dev, "%s DT disabled\n", __func__);
4213 pd = devm_kzalloc(&client->dev, sizeof(*pd), GFP_KERNEL);
4218 i2c_set_clientdata(client, pd);
4219 st->rc_dis = true; /* disable register cache during initialization */
4221 pd->i2c_dev_id = i2c_dev_id;
4222 /* Init fw load worker thread */
4223 INIT_WORK(&pd->fw_load_work, nvi_dmp_fw_load_worker);
4224 schedule_work(&pd->fw_load_work);
4228 MODULE_DEVICE_TABLE(i2c, nvi_i2c_device_id);
4230 static const struct of_device_id nvi_of_match[] = {
4231 { .compatible = "invensense,mpu6xxx", },
4232 { .compatible = "invensense,mpu6050", },
4233 { .compatible = "invensense,mpu6500", },
4234 { .compatible = "invensense,mpu6515", },
4235 { .compatible = "invensense,mpu9150", },
4236 { .compatible = "invensense,mpu9250", },
4237 { .compatible = "invensense,mpu9350", },
4238 { .compatible = "invensense,icm20628", },
4239 { .compatible = "invensense,icm20630", },
4240 { .compatible = "invensense,icm20632", },
4244 MODULE_DEVICE_TABLE(of, nvi_of_match);
4246 static struct i2c_driver nvi_i2c_driver = {
4247 .class = I2C_CLASS_HWMON,
4249 .remove = nvi_remove,
4250 .shutdown = nvi_shutdown,
4253 .owner = THIS_MODULE,
4254 .of_match_table = of_match_ptr(nvi_of_match),
4257 .id_table = nvi_i2c_device_id,
4260 module_i2c_driver(nvi_i2c_driver);
4262 MODULE_LICENSE("GPL");
4263 MODULE_DESCRIPTION("NVidiaInvensense driver");
4264 MODULE_AUTHOR("NVIDIA Corporation");