1 /* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
3 * This software is licensed under the terms of the GNU General Public
4 * License version 2, as published by the Free Software Foundation, and
5 * may be copied, distributed, and modified under those terms.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 /* NVS = NVidia Sensor framework */
14 /* See nvs_iio.c and nvs.h for documentation */
17 #include <linux/i2c.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/kernel.h>
21 #include <linux/err.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/regulator/consumer.h>
26 #include <linux/nvs.h>
27 #include <linux/crc32.h>
28 #include <linux/mpu_iio.h>
32 #define NVI_DRIVER_VERSION (329)
33 #define NVI_VENDOR "Invensense"
34 #define NVI_NAME "mpu6xxx"
35 #define NVI_NAME_MPU6050 "mpu6050"
36 #define NVI_NAME_MPU6500 "mpu6500"
37 #define NVI_NAME_MPU6515 "mpu6515"
38 #define NVI_NAME_MPU9150 "mpu9150"
39 #define NVI_NAME_MPU9250 "mpu9250"
40 #define NVI_NAME_MPU9350 "mpu9350"
41 #define NVI_NAME_ICM20628 "icm20628"
42 #define NVI_NAME_ICM20630 "icm20630"
43 #define NVI_NAME_ICM20632 "icm20632"
44 #define NVI_HW_ID_AUTO (0xFF)
45 #define NVI_HW_ID_MPU6050 (0x68)
46 #define NVI_HW_ID_MPU6500 (0x70)
47 #define NVI_HW_ID_MPU6515 (0x74)
48 #define NVI_HW_ID_MPU9150 (0x68)
49 #define NVI_HW_ID_MPU9250 (0x71)
50 #define NVI_HW_ID_MPU9350 (0x72)
51 #define NVI_HW_ID_ICM20628 (0xA2)
52 #define NVI_HW_ID_ICM20630 (0xAB)
53 #define NVI_HW_ID_ICM20632 (0xAD)
54 /* NVI_FW_CRC_CHECK used only during development to confirm valid FW */
55 #define NVI_FW_CRC_CHECK (0)
59 struct work_struct fw_load_work;
60 const struct i2c_device_id *i2c_dev_id;
66 const struct nvi_hal *hal;
68 /* ARRAY_SIZE(nvi_id_hals) must match ARRAY_SIZE(nvi_i2c_device_id) - 1 */
82 /* enum NVI_NDX_N must match ARRAY_SIZE(nvi_i2c_device_id) - 1 */
83 static struct i2c_device_id nvi_i2c_device_id[] = {
84 { NVI_NAME, NVI_NDX_AUTO },
85 { NVI_NAME_MPU6050, NVI_NDX_MPU6050 },
86 { NVI_NAME_MPU6500, NVI_NDX_MPU6500 },
87 { NVI_NAME_MPU6515, NVI_NDX_MPU6515 },
88 { NVI_NAME_MPU9150, NVI_NDX_MPU9150 },
89 { NVI_NAME_MPU9250, NVI_NDX_MPU9250 },
90 { NVI_NAME_MPU9350, NVI_NDX_MPU9350 },
91 { NVI_NAME_ICM20628, NVI_NDX_ICM20628 },
92 { NVI_NAME_ICM20630, NVI_NDX_ICM20630 },
93 { NVI_NAME_ICM20632, NVI_NDX_ICM20632 },
105 NVI_INFO_REG_WR = 0xC6, /* use 0xD0 on cmd line */
112 /* regulator names in order of powering on */
113 static char *nvi_vregs[] = {
118 static struct nvi_state *nvi_state_local;
121 static int nvi_dmp_fw(struct nvi_state *st);
122 static int nvi_aux_bypass_enable(struct nvi_state *st, bool enable);
123 static int nvi_read(struct nvi_state *st, bool flush);
125 static int nvi_nb_vreg(struct nvi_state *st,
126 unsigned long event, unsigned int i)
128 if (event & REGULATOR_EVENT_POST_ENABLE)
129 st->ts_vreg_en[i] = nvs_timestamp();
130 else if (event & (REGULATOR_EVENT_DISABLE |
131 REGULATOR_EVENT_FORCE_DISABLE))
132 st->ts_vreg_en[i] = 0;
133 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
134 dev_info(&st->i2c->dev, "%s %s event=0x%x ts=%lld\n",
135 __func__, st->vreg[i].supply, (unsigned int)event,
140 static int nvi_nb_vreg_vdd(struct notifier_block *nb,
141 unsigned long event, void *ignored)
143 struct nvi_state *st = container_of(nb, struct nvi_state, nb_vreg[0]);
145 return nvi_nb_vreg(st, event, 0);
148 static int nvi_nb_vreg_vlogic(struct notifier_block *nb,
149 unsigned long event, void *ignored)
151 struct nvi_state *st = container_of(nb, struct nvi_state, nb_vreg[1]);
153 return nvi_nb_vreg(st, event, 1);
156 static int (* const nvi_nb_vreg_pf[])(struct notifier_block *nb,
157 unsigned long event, void *ignored) = {
162 void nvi_err(struct nvi_state *st)
169 static void nvi_mutex_lock(struct nvi_state *st)
174 for (i = 0; i < DEV_N; i++)
175 st->nvs->nvs_mutex_lock(st->snsr[i].nvs_st);
179 static void nvi_mutex_unlock(struct nvi_state *st)
184 for (i = 0; i < DEV_N; i++)
185 st->nvs->nvs_mutex_unlock(st->snsr[i].nvs_st);
189 static void nvi_disable_irq(struct nvi_state *st)
191 if (st->i2c->irq && !st->irq_dis) {
192 disable_irq_nosync(st->i2c->irq);
194 if (st->sts & NVS_STS_SPEW_MSG)
195 dev_info(&st->i2c->dev, "%s IRQ disabled\n", __func__);
199 static void nvi_enable_irq(struct nvi_state *st)
201 if (st->i2c->irq && st->irq_dis) {
202 enable_irq(st->i2c->irq);
204 if (st->sts & NVS_STS_SPEW_MSG)
205 dev_info(&st->i2c->dev, "%s IRQ enabled\n", __func__);
209 static int nvi_i2c_w(struct nvi_state *st, u16 len, u8 *buf)
213 msg.addr = st->i2c->addr;
217 if (i2c_transfer(st->i2c->adapter, &msg, 1) != 1) {
225 static int nvi_wr_reg_bank_sel(struct nvi_state *st, u8 reg_bank)
230 if (!st->hal->reg->reg_bank.reg)
234 if (reg_bank != st->rc.reg_bank) {
235 buf[0] = st->hal->reg->reg_bank.reg;
237 ret = nvi_i2c_w(st, sizeof(buf), buf);
239 dev_err(&st->i2c->dev, "%s 0x%x!->0x%x ERR=%d\n",
240 __func__, st->rc.reg_bank, reg_bank, ret);
242 if (st->sts & NVI_DBG_SPEW_MSG)
243 dev_info(&st->i2c->dev, "%s 0x%x->0x%x\n",
244 __func__, st->rc.reg_bank, reg_bank);
245 st->rc.reg_bank = reg_bank;
251 static int nvi_i2c_write(struct nvi_state *st, u8 bank, u16 len, u8 *buf)
255 ret = nvi_wr_reg_bank_sel(st, bank);
257 ret = nvi_i2c_w(st, len, buf);
261 static int nvi_i2c_write_be(struct nvi_state *st, const struct nvi_br *br,
268 for (i = len; i > 0; i--)
269 buf[i] = (u8)(val >> (8 * (len - i)));
270 return nvi_i2c_write(st, br->bank, len + 1, buf);
273 static int nvi_i2c_write_le(struct nvi_state *st, const struct nvi_br *br,
280 for (i = 0; i < len; i++)
281 buf[i + 1] = (u8)(val >> (8 * i));
282 return nvi_i2c_write(st, br->bank, len + 1, buf);
285 int nvi_i2c_write_rc(struct nvi_state *st, const struct nvi_br *br, u32 val,
286 const char *fn, u8 *rc, bool be)
298 for (i = 0; i < len; i++) {
299 if (*(rc + i) != (u8)(val >> (8 * i))) {
307 if (wr || st->rc_dis) {
309 ret = nvi_i2c_write_be(st, br, len, val);
311 ret = nvi_i2c_write_le(st, br, len, val);
315 dev_err(&st->i2c->dev,
316 "%s 0x%08x!=>0x%01x%02x ERR=%d\n",
317 fn, val, br->bank, br->reg, ret);
319 if (st->sts & NVI_DBG_SPEW_MSG && fn)
320 dev_info(&st->i2c->dev,
321 "%s 0x%08x=>0x%01x%02x\n",
322 fn, val, br->bank, br->reg);
324 for (i = 0; i < len; i++)
325 *(rc + i) = (u8)(val >> (8 * i));
332 int nvi_i2c_wr(struct nvi_state *st, const struct nvi_br *br,
333 u8 val, const char *fn)
339 buf[1] = val | br->dflt;
340 ret = nvi_wr_reg_bank_sel(st, br->bank);
342 ret = nvi_i2c_w(st, sizeof(buf), buf);
346 dev_err(&st->i2c->dev,
347 "%s 0x%02x!=>0x%01x%02x ERR=%d\n",
348 fn, val, br->bank, br->reg, ret);
350 if (st->sts & NVI_DBG_SPEW_MSG && fn)
351 dev_info(&st->i2c->dev,
352 "%s 0x%02x=>0x%01x%02x\n",
353 fn, val, br->bank, br->reg);
359 int nvi_i2c_wr_rc(struct nvi_state *st, const struct nvi_br *br,
360 u8 val, const char *fn, u8 *rc)
365 if (val != *rc || st->rc_dis) {
366 ret = nvi_i2c_wr(st, br, val, fn);
373 int nvi_i2c_r(struct nvi_state *st, u8 bank, u8 reg, u16 len, u8 *buf)
375 struct i2c_msg msg[2];
378 ret = nvi_wr_reg_bank_sel(st, bank);
384 msg[0].addr = st->i2c->addr;
388 msg[1].addr = st->i2c->addr;
389 msg[1].flags = I2C_M_RD;
392 if (i2c_transfer(st->i2c->adapter, msg, 2) != 2) {
400 int nvi_i2c_rd(struct nvi_state *st, const struct nvi_br *br, u8 *buf)
406 return nvi_i2c_r(st, br->bank, br->reg, len, buf);
409 int nvi_mem_wr(struct nvi_state *st, u16 addr, u16 len, u8 *data,
412 struct i2c_msg msg[6];
421 ret = nvi_wr_reg_bank_sel(st, st->hal->reg->mem_bank.bank);
425 buf_bank[0] = st->hal->reg->mem_bank.reg;
426 buf_bank[1] = addr >> 8;
427 buf_addr[0] = st->hal->reg->mem_addr.reg;
428 buf_addr[1] = addr & 0xFF;
429 buf_data[0] = st->hal->reg->mem_rw.reg;
430 msg[0].addr = st->i2c->addr;
432 msg[0].len = sizeof(buf_bank);
433 msg[0].buf = buf_bank;
434 msg[1].addr = st->i2c->addr;
436 msg[1].len = sizeof(buf_addr);
437 msg[1].buf = buf_addr;
438 msg[2].addr = st->i2c->addr;
440 msg[2].buf = buf_data;
441 msg[3].addr = st->i2c->addr;
443 msg[3].len = sizeof(buf_addr);
444 msg[3].buf = buf_addr;
445 msg[4].addr = st->i2c->addr;
448 msg[4].buf = buf_data;
449 msg[5].addr = st->i2c->addr;
450 msg[5].flags = I2C_M_RD;
451 msg[5].buf = &buf_data[1];
453 bank_len = (addr + len - 1) >> 8;
454 for (; buf_bank[1] <= bank_len; buf_bank[1]++) {
455 if (buf_bank[1] == bank_len)
456 data_len = len - data_i;
458 data_len = 0x0100 - buf_addr[1];
459 msg[2].len = data_len + 1;
460 memcpy(&buf_data[1], data + data_i, data_len);
461 if (i2c_transfer(st->i2c->adapter, msg, 3) != 3) {
467 msg[5].len = data_len;
468 if (i2c_transfer(st->i2c->adapter, &msg[3], 3) != 3) {
473 ret = memcmp(&buf_data[1], data + data_i, data_len);
485 int nvi_mem_wr_be(struct nvi_state *st, u16 addr, u16 len, u32 val)
491 for (i = 0; i < len; i++)
492 buf[i] = (u8)(val >> (8 * (len - (i + 1))));
493 ret = nvi_mem_wr(st, addr, len, buf, false);
494 if (st->sts & NVI_DBG_SPEW_MSG)
495 dev_info(&st->i2c->dev, "%s 0x%08x=>0x%04hx err=%d\n",
496 __func__, val, addr, ret);
500 int nvi_mem_wr_be_mc(struct nvi_state *st, u16 addr, u16 len, u32 val, u32 *mc)
504 if (val != *mc || st->mc_dis) {
505 ret = nvi_mem_wr_be(st, addr, len, val);
512 int nvi_mem_rd(struct nvi_state *st, u16 addr, u16 len, u8 *data)
514 struct i2c_msg msg[4];
522 ret = nvi_wr_reg_bank_sel(st, st->hal->reg->mem_bank.bank);
526 buf_bank[0] = st->hal->reg->mem_bank.reg;
527 buf_bank[1] = addr >> 8;
528 buf_addr[0] = st->hal->reg->mem_addr.reg;
529 buf_addr[1] = addr & 0xFF;
530 msg[0].addr = st->i2c->addr;
532 msg[0].len = sizeof(buf_bank);
533 msg[0].buf = buf_bank;
534 msg[1].addr = st->i2c->addr;
536 msg[1].len = sizeof(buf_addr);
537 msg[1].buf = buf_addr;
538 msg[2].addr = st->i2c->addr;
541 msg[2].buf = (u8 *)&st->hal->reg->mem_rw.reg;
542 msg[3].addr = st->i2c->addr;
543 msg[3].flags = I2C_M_RD;
545 bank_len = (addr + len - 1) >> 8;
546 for (; buf_bank[1] <= bank_len; buf_bank[1]++) {
547 if (buf_bank[1] == bank_len)
548 data_len = len - data_i;
550 data_len = 0x0100 - buf_addr[1];
551 msg[3].len = data_len;
552 msg[3].buf = data + data_i;
553 if (i2c_transfer(st->i2c->adapter, msg, 4) != 4) {
565 int nvi_mem_rd_le(struct nvi_state *st, u16 addr, u16 len, u32 *val)
572 ret = nvi_mem_rd(st, addr, len, buf_rd);
574 /* convert to little endian */
575 for (i = 0; i < len; i++) {
586 static int nvi_rd_accel_offset(struct nvi_state *st)
592 for (i = 0; i < AXIS_N; i++) {
593 ret = nvi_i2c_rd(st, &st->hal->reg->a_offset_h[i], buf);
595 st->rc.accel_offset[i] = be16_to_cpup((__be16 *)buf);
600 int nvi_wr_accel_offset(struct nvi_state *st, unsigned int axis, u16 offset)
602 return nvi_i2c_write_rc(st, &st->hal->reg->a_offset_h[axis], offset,
603 __func__, (u8 *)&st->rc.accel_offset[axis], true);
606 static int nvi_rd_gyro_offset(struct nvi_state *st)
612 for (i = 0; i < AXIS_N; i++) {
613 ret = nvi_i2c_rd(st, &st->hal->reg->g_offset_h[i], buf);
615 st->rc.gyro_offset[i] = be16_to_cpup((__be16 *)buf);
620 int nvi_wr_gyro_offset(struct nvi_state *st, unsigned int axis, u16 offset)
622 return nvi_i2c_write_rc(st, &st->hal->reg->g_offset_h[axis], offset,
623 __func__, (u8 *)&st->rc.gyro_offset[axis], true);
626 int nvi_wr_fifo_cfg(struct nvi_state *st, int fifo)
630 if (!st->hal->reg->fifo_cfg.reg)
634 fifo_cfg = (fifo << 2) | 0x01;
637 return nvi_i2c_wr_rc(st, &st->hal->reg->fifo_cfg, fifo_cfg,
638 NULL, &st->rc.fifo_cfg);
641 static int nvi_wr_i2c_slv4_ctrl(struct nvi_state *st, bool slv4_en)
645 val = st->aux.delay_hw;
646 val |= (st->aux.port[AUX_PORT_IO].nmp.ctrl & BIT_I2C_SLV_REG_DIS);
649 return nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv4_ctrl, val,
650 __func__, &st->rc.i2c_slv4_ctrl);
653 static int nvi_rd_int_sts_dmp(struct nvi_state *st)
657 ret = nvi_i2c_rd(st, &st->hal->reg->int_dmp, &st->rc.int_dmp);
659 dev_err(&st->i2c->dev, "%s %x=ERR %d\n",
660 __func__, st->hal->reg->int_dmp.reg, ret);
664 static int nvi_rd_int_status(struct nvi_state *st)
666 u8 buf[4] = {0, 0, 0, 0};
671 ret = nvi_i2c_rd(st, &st->hal->reg->int_status, buf);
673 dev_err(&st->i2c->dev, "%s %x=ERR %d\n",
674 __func__, st->hal->reg->int_status.reg, ret);
676 /* convert to little endian */
677 st->rc.int_status = 0;
678 n = st->hal->reg->int_status.len;
681 for (i = 0; i < n; i++) {
682 st->rc.int_status <<= 8;
683 st->rc.int_status |= buf[i];
686 if (st->rc.int_status & (1 << st->hal->bit->int_dmp))
687 ret = nvi_rd_int_sts_dmp(st);
693 int nvi_int_able(struct nvi_state *st, const char *fn, bool en)
702 if (st->en_msk & (1 << DEV_DMP)) {
703 int_en |= 1 << st->hal->bit->int_dmp;
704 } else if (st->en_msk & MSK_DEV_ALL) {
705 int_msk = 1 << st->hal->bit->int_data_rdy_0;
706 if (st->rc.fifo_cfg & 0x01) {
707 /* multi FIFO enabled */
709 for (; fifo < st->hal->fifo_n; fifo++) {
710 dev = st->hal->fifo_dev[fifo];
714 if (st->rc.fifo_en & st->hal->
715 dev[dev]->fifo_en_msk)
716 int_en |= int_msk << fifo;
723 ret = nvi_i2c_write_rc(st, &st->hal->reg->int_enable, int_en,
724 __func__, (u8 *)&st->rc.int_enable, false);
725 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
726 dev_info(&st->i2c->dev, "%s-%s en=%x int_en=%x err=%d\n",
727 __func__, fn, en, int_en, ret);
731 static void nvi_flush_aux(struct nvi_state *st, int port)
733 struct aux_port *ap = &st->aux.port[port];
736 ap->nmp.handler(NULL, 0, 0, ap->nmp.ext_driver);
739 static void nvi_flush_push(struct nvi_state *st)
745 for (i = 0; i < DEV_N; i++) {
746 if (st->snsr[i].flush) {
747 ret = st->nvs->handler(st->snsr[i].nvs_st, NULL, 0LL);
749 st->snsr[i].flush = false;
752 for (i = 0; i < AUX_PORT_IO; i++) {
753 ap = &st->aux.port[i];
755 nvi_flush_aux(st, i);
760 static int nvi_user_ctrl_rst(struct nvi_state *st, u8 user_ctrl)
769 if (user_ctrl & BIT_SIG_COND_RST)
770 user_ctrl = BITS_USER_CTRL_RST;
771 if (user_ctrl & BIT_DMP_RST)
772 user_ctrl |= BIT_FIFO_RST;
773 if (user_ctrl & BIT_FIFO_RST) {
775 if (st->hal->reg->fifo_rst.reg) {
777 if (st->en_msk & (1 << DEV_DMP)) {
778 ret = nvi_wr_fifo_cfg(st, 0);
781 for (i = 0; i < DEV_AXIS_N; i++) {
782 if (st->hal->dev[i]->fifo_en_msk &&
787 msk = st->snsr[DEV_AUX].enable;
788 msk |= st->aux.dmp_en_msk;
789 if (st->hal->dev[DEV_AUX]->fifo_en_msk && msk)
792 ret = nvi_wr_fifo_cfg(st, 0);
794 ret = nvi_wr_fifo_cfg(st, -1);
796 if (st->en_msk & (1 << DEV_DMP))
800 ret |= nvi_i2c_wr(st, &st->hal->reg->fifo_rst,
802 ret |= nvi_i2c_wr(st, &st->hal->reg->fifo_rst,
808 if (user_ctrl == BIT_FIFO_RST)
812 user_ctrl &= ~BIT_FIFO_RST;
816 ret = nvi_i2c_wr(st, &st->hal->reg->user_ctrl, user_ctrl, __func__);
820 if (user_ctrl & BIT_FIFO_RST)
822 for (i = 0; i < POWER_UP_TIME; i++) {
824 ret = nvi_i2c_rd(st, &st->hal->reg->user_ctrl,
826 if (!(user_ctrl & BITS_USER_CTRL_RST))
832 st->rc.user_ctrl = user_ctrl;
833 if (user_ctrl & BIT_DMP_RST && st->hal->dmp) {
834 if (st->hal->dmp->dmp_reset_delay_ms)
835 msleep(st->hal->dmp->dmp_reset_delay_ms);
842 int nvi_user_ctrl_en(struct nvi_state *st, const char *fn,
843 bool en_dmp, bool en_fifo, bool en_i2c, bool en_irq)
851 if (!(st->en_msk & (1 << DEV_DMP)))
854 if (en_fifo && !en_dmp) {
855 for (i = 0; i < st->hal->src_n; i++)
856 st->src[i].fifo_data_n = 0;
858 for (i = 0; i < DEV_MPU_N; i++) {
859 if (st->snsr[i].enable &&
860 st->hal->dev[i]->fifo_en_msk) {
861 val |= st->hal->dev[i]->fifo_en_msk;
862 st->src[st->hal->dev[i]->src].fifo_data_n +=
863 st->hal->dev[i]->fifo_data_n;
864 st->fifo_src = st->hal->dev[i]->src;
868 if (st->hal->dev[DEV_AUX]->fifo_en_msk &&
869 st->snsr[DEV_AUX].enable) {
870 st->src[st->hal->dev[DEV_AUX]->src].fifo_data_n +=
872 st->fifo_src = st->hal->dev[DEV_AUX]->src;
873 for (i = 0; i < AUX_PORT_IO; i++) {
874 ap = &st->aux.port[i];
875 if (st->snsr[DEV_AUX].enable & (1 << i) &&
876 (ap->nmp.addr & BIT_I2C_READ) &&
879 st->hal->bit->slv_fifo_en[i]);
887 ret |= nvi_i2c_write_rc(st, &st->hal->reg->fifo_en, val,
888 __func__, (u8 *)&st->rc.fifo_en, false);
895 if (en_i2c && (st->en_msk & (1 << DEV_AUX)))
896 val |= BIT_I2C_MST_EN;
900 ret = nvi_int_able(st, __func__, true);
903 ret |= nvi_i2c_wr_rc(st, &st->hal->reg->user_ctrl, val,
904 __func__, &st->rc.user_ctrl);
906 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
907 dev_info(&st->i2c->dev,
908 "%s-%s DMP=%x FIFO=%x I2C=%x IRQ=%x err=%d\n",
909 __func__, fn, en_dmp, en_fifo, en_i2c, en_irq, ret);
913 int nvi_wr_pm1(struct nvi_state *st, const char *fn, u8 pm1)
919 if (pm1 & BIT_H_RESET) {
920 /* must make sure FIFO is off or IRQ storm will occur */
921 ret = nvi_int_able(st, __func__, false);
922 ret |= nvi_user_ctrl_en(st, __func__,
923 false, false, false, false);
925 nvi_user_ctrl_rst(st, BITS_USER_CTRL_RST);
926 ret = nvi_i2c_wr(st, &st->hal->reg->pm1,
927 BIT_H_RESET, __func__);
930 ret = nvi_i2c_wr_rc(st, &st->hal->reg->pm1, pm1,
931 __func__, &st->rc.pm1);
934 if (pm1 & BIT_H_RESET && !ret) {
935 st->en_msk &= MSK_RST;
936 memset(&st->rc, 0, sizeof(st->rc));
937 if (st->hal->fn->por2rc)
938 st->hal->fn->por2rc(st);
939 for (i = 0; i < st->hal->src_n; i++)
940 st->src[i].period_us_req = 0;
942 for (i = 0; i < (POWER_UP_TIME / REG_UP_TIME); i++) {
945 ret = nvi_i2c_rd(st, &st->hal->reg->pm1, &pm1_rd);
946 if ((!ret) && (!(pm1_rd & BIT_H_RESET)))
952 nvi_rd_accel_offset(st);
953 nvi_rd_gyro_offset(st);
957 if (st->sts & NVI_DBG_SPEW_MSG)
958 dev_info(&st->i2c->dev, "%s-%s pm1=%x err=%d\n",
959 __func__, fn, pm1, ret);
963 static int nvi_pm_w(struct nvi_state *st, u8 pm1, u8 pm2, u8 lp)
966 unsigned int delay_ms;
970 ret = nvs_vregs_enable(&st->i2c->dev, st->vreg, ARRAY_SIZE(nvi_vregs));
973 for (i = 0; i < ARRAY_SIZE(nvi_vregs); i++) {
974 por_ns = nvs_timestamp() - st->ts_vreg_en[i];
975 if ((por_ns < 0) || (!st->ts_vreg_en[i])) {
976 delay_ms = (POR_MS * 1000000);
980 if (por_ns < (POR_MS * 1000000)) {
981 por_ns = (POR_MS * 1000000) - por_ns;
982 if (por_ns > delay_ms)
983 delay_ms = (unsigned int)por_ns;
987 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
988 dev_info(&st->i2c->dev, "%s %ums delay\n",
992 ret = nvi_wr_pm1(st, __func__, BIT_H_RESET);
994 ret |= st->hal->fn->pm(st, pm1, pm2, lp);
998 int nvi_pm_wr(struct nvi_state *st, const char *fn, u8 pm1, u8 pm2, u8 lp)
1002 ret = nvi_pm_w(st, pm1, pm2, lp);
1003 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1004 dev_info(&st->i2c->dev, "%s-%s PM1=%x PM2=%x LPA=%x err=%d\n",
1005 __func__, fn, pm1, pm2, lp, ret);
1006 st->pm = NVI_PM_ERR; /* lost st->pm status: nvi_pm is being bypassed */
1012 * @param pm_req: call with one of the following:
1013 * NVI_PM_OFF_FORCE = force off state
1014 * NVI_PM_ON = minimum power for device access
1015 * NVI_PM_ON_FULL = power for gyro
1016 * NVI_PM_AUTO = automatically sets power after
1018 * Typical use is to set needed power for configuration and
1019 * then call with NVI_PM_AUTO when done. All other NVI_PM_
1020 * levels are handled automatically and are for internal
1022 * @return int: returns 0 for success or error code
1024 static int nvi_pm(struct nvi_state *st, const char *fn, int pm_req)
1033 lp = st->rc.lp_config;
1034 if (pm_req == NVI_PM_AUTO) {
1036 if (!(st->en_msk & MSK_PM_ACC_EN))
1037 pm2 |= BIT_PWR_ACCEL_STBY;
1038 if (!st->snsr[DEV_GYR].enable)
1039 pm2 |= BIT_PWR_GYRO_STBY;
1040 if (st->en_msk & MSK_PM_ON_FULL) {
1041 pm = NVI_PM_ON_FULL;
1042 } else if (st->en_msk & MSK_PM_ON) {
1044 } else if ((st->en_msk & ((1 << EN_LP) |
1045 MSK_DEV_ALL)) == MSK_PM_LP) {
1046 if (st->snsr[DEV_ACC].period_us >=
1047 st->snsr[DEV_ACC].cfg.thresh_hi) {
1048 for (lp = 0; lp < st->hal->lp_tbl_n; lp++) {
1049 if (st->snsr[DEV_ACC].period_us >=
1050 st->hal->lp_tbl[lp])
1053 pm = NVI_PM_ON_CYCLE;
1057 } else if (st->en_msk & MSK_PM_LP) {
1059 } else if (st->en_msk & MSK_PM_STDBY || st->aux.bypass_lock) {
1066 if ((pm_req > NVI_PM_STDBY) && (pm_req < st->pm))
1071 if (pm == NVI_PM_OFF) {
1072 for (i = 0; i < AUX_PORT_IO; i++) {
1073 if (st->aux.port[i].nmp.shutdown_bypass) {
1074 nvi_aux_bypass_enable(st, true);
1079 if (st->en_msk & (1 << FW_LOADED))
1084 case NVI_PM_OFF_FORCE:
1089 pm2 = (BIT_PWR_ACCEL_STBY | BIT_PWR_GYRO_STBY);
1092 case NVI_PM_ON_CYCLE:
1094 pm2 &= ~BIT_PWR_ACCEL_STBY;
1098 pm1 = INV_CLK_INTERNAL;
1099 if (pm2 & BIT_PWR_ACCEL_STBY) {
1100 for (i = 0; i < DEV_N_AUX; i++) {
1101 if (MSK_PM_ACC_EN & (1 << i)) {
1102 if (st->snsr[i].enable) {
1103 pm2 &= ~BIT_PWR_ACCEL_STBY;
1112 case NVI_PM_ON_FULL:
1114 /* gyro must be turned on before going to PLL clock */
1115 pm2 &= ~BIT_PWR_GYRO_STBY;
1119 dev_err(&st->i2c->dev, "%s %d=>%d ERR=EINVAL\n",
1120 __func__, st->pm, pm);
1124 if (pm != st->pm || lp != st->rc.lp_config || pm2 != (st->rc.pm2 &
1125 (BIT_PWR_ACCEL_STBY | BIT_PWR_GYRO_STBY))) {
1126 if (pm == NVI_PM_OFF) {
1127 if (st->pm > NVI_PM_OFF || st->pm == NVI_PM_ERR)
1128 ret |= nvi_wr_pm1(st, __func__, BIT_H_RESET);
1129 ret |= nvi_pm_w(st, pm1, pm2, lp);
1130 ret |= nvs_vregs_disable(&st->i2c->dev, st->vreg,
1131 ARRAY_SIZE(nvi_vregs));
1133 if (pm == NVI_PM_ON_CYCLE)
1134 /* last chance to write to regs before cycle */
1135 ret |= nvi_int_able(st, __func__, true);
1136 ret |= nvi_pm_w(st, pm1, pm2, lp);
1137 if (pm > NVI_PM_STDBY)
1138 mdelay(REG_UP_TIME);
1141 dev_err(&st->i2c->dev, "%s PM %d=>%d ERR=%d\n",
1142 __func__, st->pm, pm, ret);
1145 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1146 dev_info(&st->i2c->dev,
1147 "%s-%s PM %d=>%d PM1=%x PM2=%x LP=%x\n",
1148 __func__, fn, st->pm, pm, pm1, pm2, lp);
1156 static void nvi_pm_exit(struct nvi_state *st)
1159 nvi_pm(st, __func__, NVI_PM_OFF_FORCE);
1160 nvs_vregs_exit(&st->i2c->dev, st->vreg, ARRAY_SIZE(nvi_vregs));
1163 static int nvi_pm_init(struct nvi_state *st)
1167 ret = nvs_vregs_init(&st->i2c->dev,
1168 st->vreg, ARRAY_SIZE(nvi_vregs), nvi_vregs);
1169 st->pm = NVI_PM_ERR;
1173 static int nvi_dmp_fw(struct nvi_state *st)
1175 #if NVI_FW_CRC_CHECK
1177 #endif /* NVI_FW_CRC_CHECK */
1183 #if NVI_FW_CRC_CHECK
1184 crc32 = crc32(0, st->hal->dmp->fw, st->hal->dmp->fw_len);
1185 if (crc32 != st->hal->dmp->fw_crc32) {
1186 dev_err(&st->i2c->dev, "%s FW CRC FAIL %x != %x\n",
1187 __func__, crc32, st->hal->dmp->fw_crc32);
1190 #endif /* NVI_FW_CRC_CHECK */
1192 ret = nvi_user_ctrl_en(st, __func__, false, false, false, false);
1196 ret = nvi_mem_wr(st, st->hal->dmp->fw_mem_addr,
1197 st->hal->dmp->fw_len,
1198 (u8 *)st->hal->dmp->fw, true);
1200 dev_err(&st->i2c->dev, "%s ERR: nvi_mem_wr\n", __func__);
1204 ret = nvi_i2c_write_rc(st, &st->hal->reg->fw_start,
1205 st->hal->dmp->fw_start,
1206 __func__, NULL, true);
1210 ret = st->hal->dmp->fn_init(st); /* nvi_dmp_init */
1212 dev_err(&st->i2c->dev, "%s ERR: nvi_dmp_init\n", __func__);
1216 nvi_user_ctrl_en(st, __func__, false, false, false, false);
1217 st->en_msk |= (1 << FW_LOADED);
1221 void nvi_push_delay(struct nvi_state *st)
1225 for (i = 0; i < DEV_MPU_N; i++) {
1226 if (st->snsr[i].enable) {
1227 if (st->snsr[i].push_delay_ns &&
1228 !st->snsr[i].ts_push_delay)
1229 st->snsr[i].ts_push_delay = nvs_timestamp() +
1230 st->snsr[i].push_delay_ns;
1232 st->snsr[i].ts_push_delay = 0;
1237 int nvi_aux_delay(struct nvi_state *st, const char *fn)
1240 unsigned int msk_en;
1241 unsigned int src_us;
1246 /* determine valid delays by ports enabled */
1248 msk_en = st->snsr[DEV_AUX].enable | st->aux.dmp_en_msk;
1249 for (i = 0; msk_en; i++) {
1250 if (msk_en & (1 << i)) {
1251 msk_en &= ~(1 << i);
1252 if (delay < st->aux.port[i].nmp.delay_ms)
1253 delay = st->aux.port[i].nmp.delay_ms;
1256 src_us = st->src[st->hal->dev[DEV_AUX]->src].period_us_src;
1258 delay *= 1000; /* ms => us */
1259 if (delay % src_us) {
1269 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1270 dev_info(&st->i2c->dev, "%s-%s aux.delay_hw=%u=>%u\n",
1271 __func__, fn, st->aux.delay_hw, delay);
1272 st->aux.delay_hw = delay;
1273 ret = nvi_wr_i2c_slv4_ctrl(st, (bool)
1274 (st->rc.i2c_slv4_ctrl & BIT_SLV_EN));
1275 /* HW port delay enable */
1276 val = BIT_DELAY_ES_SHADOW;
1277 for (i = 0; i < AUX_PORT_MAX; i++) {
1278 if (st->aux.port[i].nmp.delay_ms)
1281 ret |= nvi_i2c_wr_rc(st, &st->hal->reg->i2c_mst_delay_ctrl, val,
1282 __func__, &st->rc.i2c_mst_delay_ctrl);
1286 static int nvi_timeout(struct nvi_state *st)
1288 bool disabled = true;
1289 unsigned int timeout_us = -1;
1292 /* find the fastest batch timeout of all the enabled devices */
1293 for (i = 0; i < DEV_N_AUX; i++) {
1294 if (st->snsr[i].enable) {
1295 if (st->snsr[i].timeout_us < timeout_us)
1296 timeout_us = st->snsr[i].timeout_us;
1301 disabled = true; /* batch mode is currently disabled */
1303 timeout_us = 0; /* batch mode disabled */
1304 if (timeout_us != st->bm_timeout_us) {
1305 st->bm_timeout_us = timeout_us;
1312 static int nvi_period_src(struct nvi_state *st, int src)
1314 bool enabled = false;
1315 unsigned int period_us = -1;
1316 unsigned int dev_msk;
1322 /* find the fastest period of all the enabled devices */
1323 dev_msk = st->hal->src[src].dev_msk;
1324 for (i = 0; dev_msk; i++) {
1325 if (dev_msk & (1 << i)) {
1326 dev_msk &= ~(1 << i);
1327 if (st->snsr[i].enable && st->snsr[i].period_us) {
1328 if (st->snsr[i].period_us < period_us)
1329 period_us = st->snsr[i].period_us;
1336 if (period_us < st->hal->src[src].period_us_min)
1337 period_us = st->hal->src[src].period_us_min;
1338 if (period_us > st->hal->src[src].period_us_max)
1339 period_us = st->hal->src[src].period_us_max;
1340 if (period_us != st->src[src].period_us_req) {
1341 st->src[src].period_us_req = period_us;
1349 int nvi_period_aux(struct nvi_state *st)
1351 bool enabled = false;
1352 unsigned int period_us = -1;
1353 unsigned int timeout_us = -1;
1354 unsigned int msk_en;
1358 msk_en = st->snsr[DEV_AUX].enable | st->aux.dmp_en_msk;
1359 for (i = 0; msk_en; i++) {
1360 if (msk_en & (1 << i)) {
1361 msk_en &= ~(1 << i);
1362 if (st->aux.port[i].period_us) {
1363 if (st->aux.port[i].period_us < period_us)
1364 period_us = st->aux.port[i].period_us;
1365 if (st->aux.port[i].timeout_us < timeout_us)
1367 st->aux.port[i].timeout_us;
1374 st->snsr[DEV_AUX].period_us = period_us;
1375 st->snsr[DEV_AUX].timeout_us = timeout_us;
1377 ret = nvi_period_src(st, st->hal->dev[DEV_AUX]->src);
1378 ret |= nvi_timeout(st);
1382 static int nvi_period_all(struct nvi_state *st)
1387 for (src = 0; src < st->hal->src_n; src++) {
1388 if (st->hal->src[src].dev_msk & (1 << DEV_AUX))
1389 continue; /* run nvi_period_aux last for timeout */
1391 ret |= nvi_period_src(st, src);
1394 ret |= nvi_period_aux(st);
1398 static int nvi_en(struct nvi_state *st)
1400 bool dmp_en = false;
1406 if (st->snsr[DEV_GYR].enable) {
1407 ret_t = nvi_pm(st, __func__, NVI_PM_ON_FULL);
1411 for (i = 0; i < DEV_N_AUX; i++) {
1412 if (st->snsr[i].enable) {
1413 ret_t = nvi_pm(st, __func__, NVI_PM_ON);
1420 return nvi_pm(st, __func__, NVI_PM_AUTO);
1423 ret_t |= nvi_int_able(st, __func__, false);
1424 ret_t |= nvi_user_ctrl_en(st, __func__, false, false, false, false);
1426 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1427 dev_err(&st->i2c->dev, "%s en_msk=%x ERR=%d\n",
1428 __func__, st->en_msk, ret_t);
1432 if (st->en_msk & (1 << FW_LOADED)) {
1433 /* test if batch is needed or more specifically that an
1434 * enabled sensor doesn't support batch. The DMP can't
1435 * do batch and non-batch at the same time.
1437 if (st->bm_timeout_us) {
1440 /* batch disabled - test if a DMP sensor is enabled */
1441 for (i = 0; i < DEV_N_AUX; i++) {
1442 if (st->dmp_en_msk & (1 << i)) {
1443 if (st->snsr[i].enable) {
1452 ret_t |= st->hal->dmp->fn_en(st); /* nvi_dmp_en */
1453 st->en_msk |= (1 << DEV_DMP);
1455 /* reprogram for non-DMP mode below */
1457 if (st->sts & (NVS_STS_SPEW_MSG |
1459 dev_err(&st->i2c->dev,
1463 if (st->sts & (NVS_STS_SPEW_MSG |
1465 dev_info(&st->i2c->dev,
1466 "%s DMP enabled\n", __func__);
1471 if (st->en_msk & (1 << DEV_DMP)) {
1472 st->en_msk &= ~(MSK_DEV_SNSR | (1 << DEV_DMP));
1473 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1474 dev_info(&st->i2c->dev,
1475 "%s DMP disabled\n", __func__);
1476 if (st->aux.dmp_en_msk) {
1477 st->aux.dmp_en_msk = 0;
1478 nvi_aux_enable(st, __func__, true, true);
1480 for (i = 0; i < DEV_N_AUX; i++)
1481 st->snsr[i].odr = 0;
1483 for (i = 0; i < AUX_PORT_MAX; i++)
1484 st->aux.port[i].odr = 0;
1487 for (i = 0; i < st->hal->src_n; i++)
1488 ret_t |= st->hal->src[i].fn_period(st);
1490 if (st->snsr[DEV_ACC].enable) {
1491 ret = st->hal->fn->en_acc(st);
1494 st->en_msk &= ~(1 << DEV_ACC);
1496 st->en_msk |= (1 << DEV_ACC);
1499 if (st->snsr[DEV_GYR].enable) {
1500 ret = st->hal->fn->en_gyr(st);
1503 st->en_msk &= ~(1 << DEV_GYR);
1505 st->en_msk |= (1 << DEV_GYR);
1509 /* NVI_PM_AUTO to go to NVI_PM_ON_CYCLE if need be */
1510 /* this also restores correct PM mode if error */
1511 ret_t |= nvi_pm(st, __func__, NVI_PM_AUTO);
1512 if (st->pm > NVI_PM_ON_CYCLE)
1513 ret_t |= nvi_reset(st, __func__, true, false, true);
1515 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1516 dev_info(&st->i2c->dev, "%s en_msk=%x err=%d\n",
1517 __func__, st->en_msk, ret_t);
1521 static void nvi_aux_dbg(struct nvi_state *st, char *tag, int val)
1523 struct nvi_mpu_port *n;
1525 struct aux_ports *a;
1530 if (!(st->sts & NVI_DBG_SPEW_AUX))
1533 dev_info(&st->i2c->dev, "%s %s %d\n", __func__, tag, val);
1535 for (i = 0; i < AUX_PORT_IO; i++) {
1536 ret = nvi_i2c_rd(st, &st->hal->reg->i2c_slv_addr[i], &data[0]);
1537 ret |= nvi_i2c_rd(st, &st->hal->reg->i2c_slv_reg[i], &data[1]);
1538 ret |= nvi_i2c_rd(st, &st->hal->reg->i2c_slv_ctrl[i],
1540 ret |= nvi_i2c_rd(st, &st->hal->reg->i2c_slv_do[i], &data[3]);
1543 pr_info("HW: ERR=%d\n", ret);
1545 pr_info("HW: P%d AD=%x RG=%x CL=%x DO=%x\n",
1546 i, data[0], data[1], data[2], data[3]);
1547 /* RC = hardware register cache */
1548 pr_info("HC: P%d AD=%x RG=%x CL=%x DO=%x\n",
1549 i, st->rc.i2c_slv_addr[i], st->rc.i2c_slv_reg[i],
1550 st->rc.i2c_slv_ctrl[i], st->rc.i2c_slv_do[i]);
1551 n = &st->aux.port[i].nmp;
1552 /* NS = nmp structure */
1553 pr_info("NS: P%d AD=%x RG=%x CL=%x DO=%x MS=%u US=%u SB=%x\n",
1554 i, n->addr, n->reg, n->ctrl, n->data_out, n->delay_ms,
1555 st->aux.port[i].period_us, n->shutdown_bypass);
1556 p = &st->aux.port[i];
1557 /* PS = port structure */
1558 pr_info("PS: P%d OFFSET=%u DMP_CTRL=%x EN=%x HWDOUT=%x\n",
1559 i, p->ext_data_offset, !!(a->dmp_ctrl_msk & (1 << i)),
1560 !!(st->snsr[DEV_AUX].enable & (1 << i)), p->hw_do);
1563 pr_info("AUX: EN=%x MEN=%x DEN=%x DLY=%x SRC=%u DN=%u BEN=%x BLK=%d\n",
1564 !!(st->en_msk & (1 << DEV_AUX)),
1565 !!(st->rc.user_ctrl & BIT_I2C_MST_EN), st->aux.dmp_en_msk,
1566 (st->rc.i2c_slv4_ctrl & BITS_I2C_MST_DLY),
1567 st->src[st->hal->dev[DEV_AUX]->src].period_us_src,
1568 a->ext_data_n, (st->rc.int_pin_cfg & BIT_BYPASS_EN),
1572 static void nvi_aux_ext_data_offset(struct nvi_state *st)
1575 unsigned int offset = 0;
1577 for (i = 0; i < AUX_PORT_IO; i++) {
1578 if (st->aux.port[i].nmp.addr & BIT_I2C_READ) {
1579 st->aux.port[i].ext_data_offset = offset;
1580 offset += (st->rc.i2c_slv_ctrl[i] &
1581 BITS_I2C_SLV_CTRL_LEN);
1584 if (offset > AUX_EXT_DATA_REG_MAX) {
1585 offset = AUX_EXT_DATA_REG_MAX;
1586 dev_err(&st->i2c->dev,
1587 "%s ERR MPU slaves exceed data storage\n", __func__);
1589 st->aux.ext_data_n = offset;
1593 static int nvi_aux_port_data_out(struct nvi_state *st,
1594 int port, u8 data_out)
1598 ret = nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_do[port], data_out,
1599 NULL, &st->rc.i2c_slv_do[port]);
1601 st->aux.port[port].nmp.data_out = data_out;
1602 st->aux.port[port].hw_do = true;
1604 st->aux.port[port].hw_do = false;
1609 static int nvi_aux_port_wr(struct nvi_state *st, int port)
1611 struct aux_port *ap;
1614 ap = &st->aux.port[port];
1615 ret = nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_addr[port],
1616 ap->nmp.addr, __func__, &st->rc.i2c_slv_addr[port]);
1617 ret |= nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_reg[port], ap->nmp.reg,
1618 __func__, &st->rc.i2c_slv_reg[port]);
1619 ret |= nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_do[port],
1620 ap->nmp.data_out, __func__, &st->rc.i2c_slv_do[port]);
1624 static int nvi_aux_port_en(struct nvi_state *st, int port, bool en)
1626 struct aux_port *ap;
1629 unsigned int dmp_ctrl_msk;
1632 ap = &st->aux.port[port];
1633 if (en && !st->rc.i2c_slv_addr[port]) {
1634 ret = nvi_aux_port_wr(st, port);
1638 if (en && !ap->hw_do)
1639 nvi_aux_port_data_out(st, port, ap->nmp.data_out);
1640 if (port == AUX_PORT_IO) {
1641 ret = nvi_wr_i2c_slv4_ctrl(st, en);
1643 slv_ctrl = st->rc.i2c_slv_ctrl[port];
1645 dmp_ctrl_msk = st->aux.dmp_ctrl_msk;
1646 if (st->en_msk & (1 << DEV_DMP)) {
1647 val = ap->nmp.dmp_ctrl | BIT_SLV_EN;
1648 st->aux.dmp_ctrl_msk |= (1 << port);
1650 val = ap->nmp.ctrl | BIT_SLV_EN;
1651 st->aux.dmp_ctrl_msk &= ~(1 << port);
1653 if (ap->nmp.dmp_ctrl != ap->nmp.ctrl && dmp_ctrl_msk !=
1654 st->aux.dmp_ctrl_msk)
1655 /* AUX HW needs to be reset if slv_ctrl values
1656 * change other than enable bit.
1658 st->aux.reset_i2c = true;
1661 st->aux.dmp_ctrl_msk &= ~(1 << port);
1663 ret = nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_ctrl[port], val,
1664 __func__, &st->rc.i2c_slv_ctrl[port]);
1665 if (slv_ctrl != st->rc.i2c_slv_ctrl[port])
1666 nvi_aux_ext_data_offset(st);
1671 int nvi_aux_enable(struct nvi_state *st, const char *fn,
1672 bool en_req, bool force)
1674 bool enable = en_req;
1675 bool enabled = false;
1677 unsigned int msk_en;
1681 if (st->rc.int_pin_cfg & BIT_BYPASS_EN)
1683 /* global enable is honored only if a port is enabled */
1684 msk_en = st->snsr[DEV_AUX].enable | st->aux.dmp_en_msk;
1687 if (st->en_msk & (1 << DEV_AUX))
1689 if (force || enable != enabled) {
1691 st->en_msk |= (1 << DEV_AUX);
1692 for (i = 0; i < AUX_PORT_MAX; i++) {
1693 if (msk_en & (1 << i))
1697 ret |= nvi_aux_port_en(st, i, en);
1700 st->en_msk &= ~(1 << DEV_AUX);
1701 for (i = 0; i < AUX_PORT_MAX; i++) {
1702 if (st->rc.i2c_slv_addr[i])
1703 nvi_aux_port_en(st, i, false);
1706 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG |
1708 dev_info(&st->i2c->dev,
1709 "%s-%s en_req=%x enabled: %x->%x err=%d\n",
1710 __func__, fn, en_req, enabled, enable, ret);
1715 static int nvi_aux_port_enable(struct nvi_state *st,
1716 unsigned int port_mask, bool en)
1718 unsigned int enabled;
1722 enabled = st->snsr[DEV_AUX].enable;
1724 st->snsr[DEV_AUX].enable |= port_mask;
1726 st->snsr[DEV_AUX].enable &= ~port_mask;
1727 if (enabled == st->snsr[DEV_AUX].enable)
1730 if (st->hal->dev[DEV_AUX]->fifo_en_msk) {
1732 for (i = 0; i < AUX_PORT_IO; i++) {
1733 if (port_mask & (1 << i)) {
1734 if (st->aux.port[i].nmp.addr & BIT_I2C_READ)
1735 st->aux.reset_fifo = true;
1739 if (en && (st->rc.int_pin_cfg & BIT_BYPASS_EN))
1743 for (i = 0; i < AUX_PORT_MAX; i++) {
1744 if (port_mask & (1 << i))
1745 ret |= nvi_aux_port_en(st, i, en);
1747 ret |= nvi_aux_enable(st, __func__, true, false);
1749 if (port_mask & ((1 << AUX_PORT_IO) - 1))
1754 static int nvi_aux_port_free(struct nvi_state *st, int port)
1756 memset(&st->aux.port[port], 0, sizeof(struct aux_port));
1757 st->snsr[DEV_AUX].enable &= ~(1 << port);
1758 st->aux.dmp_en_msk &= ~(1 << port);
1759 if (st->rc.i2c_slv_addr[port]) {
1760 nvi_aux_port_wr(st, port);
1761 nvi_aux_port_en(st, port, false);
1762 nvi_aux_enable(st, __func__, false, false);
1763 nvi_user_ctrl_en(st, __func__, false, false, false, false);
1764 nvi_aux_enable(st, __func__, true, false);
1765 if (port != AUX_PORT_IO)
1766 st->aux.reset_i2c = true;
1773 static int nvi_aux_port_alloc(struct nvi_state *st,
1774 struct nvi_mpu_port *nmp, int port)
1778 if (st->aux.reset_i2c)
1779 nvi_reset(st, __func__, false, true, true);
1781 for (i = 0; i < AUX_PORT_IO; i++) {
1782 if (st->aux.port[i].nmp.addr == 0)
1785 if (i == AUX_PORT_IO)
1788 if (st->aux.port[port].nmp.addr == 0)
1794 memset(&st->aux.port[i], 0, sizeof(struct aux_port));
1795 memcpy(&st->aux.port[i].nmp, nmp, sizeof(struct nvi_mpu_port));
1796 if (!st->aux.port[i].nmp.dmp_ctrl)
1797 st->aux.port[i].nmp.dmp_ctrl = st->aux.port[i].nmp.ctrl;
1798 st->aux.port[i].period_us = st->aux.port[i].nmp.delay_us;
1802 static int nvi_aux_bypass_enable(struct nvi_state *st, bool en)
1807 if (en && (st->rc.int_pin_cfg & BIT_BYPASS_EN))
1810 val = st->rc.int_pin_cfg;
1812 ret = nvi_aux_enable(st, __func__, false, false);
1813 ret |= nvi_user_ctrl_en(st, __func__,
1814 false, false, false, false);
1816 val |= BIT_BYPASS_EN;
1817 ret = nvi_i2c_wr_rc(st, &st->hal->reg->int_pin_cfg,
1818 val, __func__, &st->rc.int_pin_cfg);
1821 val &= ~BIT_BYPASS_EN;
1822 ret = nvi_i2c_wr_rc(st, &st->hal->reg->int_pin_cfg, val,
1823 __func__, &st->rc.int_pin_cfg);
1825 nvi_aux_enable(st, __func__, true, false);
1832 static int nvi_aux_bypass_request(struct nvi_state *st, bool enable)
1838 if ((bool)(st->rc.int_pin_cfg & BIT_BYPASS_EN) == enable) {
1839 st->aux.bypass_timeout_ns = nvs_timestamp();
1840 st->aux.bypass_lock++;
1841 if (!st->aux.bypass_lock)
1842 dev_err(&st->i2c->dev, "%s rollover ERR\n", __func__);
1844 if (st->aux.bypass_lock) {
1845 ns = nvs_timestamp() - st->aux.bypass_timeout_ns;
1846 to = st->bypass_timeout_ms * 1000000;
1848 st->aux.bypass_lock = 0;
1852 if (!st->aux.bypass_lock) {
1853 ret = nvi_aux_bypass_enable(st, enable);
1855 dev_err(&st->i2c->dev, "%s ERR=%d\n",
1858 st->aux.bypass_lock++;
1864 static int nvi_aux_bypass_release(struct nvi_state *st)
1868 if (st->aux.bypass_lock)
1869 st->aux.bypass_lock--;
1870 if (!st->aux.bypass_lock) {
1871 ret = nvi_aux_bypass_enable(st, false);
1873 dev_err(&st->i2c->dev, "%s ERR=%d\n", __func__, ret);
1878 static int nvi_aux_dev_valid(struct nvi_state *st,
1879 struct nvi_mpu_port *nmp, u8 *data)
1885 /* turn off bypass */
1886 ret = nvi_aux_bypass_request(st, false);
1890 /* grab the special port */
1891 ret = nvi_aux_port_alloc(st, nmp, AUX_PORT_IO);
1892 if (ret != AUX_PORT_IO) {
1893 nvi_aux_bypass_release(st);
1897 /* enable it at fastest speed */
1898 st->aux.port[AUX_PORT_IO].nmp.delay_ms = 0;
1899 st->aux.port[AUX_PORT_IO].period_us =
1900 st->hal->src[st->hal->dev[DEV_AUX]->src].period_us_min;
1901 ret = nvi_user_ctrl_en(st, __func__, false, false, false, false);
1902 ret |= nvi_aux_port_enable(st, 1 << AUX_PORT_IO, true);
1903 ret |= nvi_user_ctrl_en(st, __func__, false, false, true, false);
1905 nvi_aux_port_free(st, AUX_PORT_IO);
1906 nvi_aux_bypass_release(st);
1910 /* now turn off all the other ports for fastest response */
1911 for (i = 0; i < AUX_PORT_IO; i++) {
1912 if (st->rc.i2c_slv_addr[i])
1913 nvi_aux_port_en(st, i, false);
1915 /* start reading the results */
1916 for (i = 0; i < AUX_DEV_VALID_READ_LOOP_MAX; i++) {
1917 mdelay(AUX_DEV_VALID_READ_DELAY_MS);
1919 ret = nvi_i2c_rd(st, &st->hal->reg->i2c_mst_status, &val);
1926 /* these will restore all previously disabled ports */
1927 nvi_aux_bypass_release(st);
1928 nvi_aux_port_free(st, AUX_PORT_IO);
1929 if (i >= AUX_DEV_VALID_READ_LOOP_MAX)
1932 if (val & 0x10) /* NACK */
1935 if (nmp->addr & BIT_I2C_READ) {
1936 ret = nvi_i2c_rd(st, &st->hal->reg->i2c_slv4_di, &val);
1941 dev_info(&st->i2c->dev, "%s MPU read 0x%x from device 0x%x\n",
1942 __func__, val, (nmp->addr & ~BIT_I2C_READ));
1944 dev_info(&st->i2c->dev, "%s MPU found device 0x%x\n",
1945 __func__, (nmp->addr & ~BIT_I2C_READ));
1950 static int nvi_aux_mpu_call_pre(struct nvi_state *st, int port)
1952 if ((port < 0) || (port >= AUX_PORT_IO))
1955 if (st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))
1958 if (!st->aux.port[port].nmp.addr)
1964 static int nvi_aux_mpu_call_post(struct nvi_state *st,
1969 nvi_aux_dbg(st, tag, ret);
1973 /* See the mpu.h file for details on the nvi_mpu_ calls.
1975 int nvi_mpu_dev_valid(struct nvi_mpu_port *nmp, u8 *data)
1977 struct nvi_state *st = nvi_state_local;
1981 if (st->sts & NVI_DBG_SPEW_AUX)
1982 pr_info("%s\n", __func__);
1984 pr_debug("%s ERR -EAGAIN\n", __func__);
1991 if ((nmp->addr & BIT_I2C_READ) && (data == NULL))
1995 if (!(st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))) {
1996 nvi_pm(st, __func__, NVI_PM_ON);
1997 ret = nvi_aux_dev_valid(st, nmp, data);
1998 nvi_pm(st, __func__, NVI_PM_AUTO);
1999 nvi_aux_dbg(st, "nvi_mpu_dev_valid=", ret);
2001 nvi_mutex_unlock(st);
2004 EXPORT_SYMBOL(nvi_mpu_dev_valid);
2006 int nvi_mpu_port_alloc(struct nvi_mpu_port *nmp, int port)
2008 struct nvi_state *st = nvi_state_local;
2012 if (st->sts & NVI_DBG_SPEW_AUX)
2013 pr_info("%s\n", __func__);
2015 pr_debug("%s ERR -EAGAIN\n", __func__);
2019 if (nmp == NULL || !(nmp->ctrl & BITS_I2C_SLV_CTRL_LEN))
2022 if (port >= AUX_PORT_IO)
2026 if (!(st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))) {
2027 nvi_pm(st, __func__, NVI_PM_ON);
2028 ret = nvi_aux_port_alloc(st, nmp, port);
2029 if (ret >= 0 && st->hal->dmp)
2030 /* need to reinitialize DMP for new device */
2031 st->hal->dmp->fn_init(st);
2032 nvi_pm(st, __func__, NVI_PM_AUTO);
2033 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_port_alloc=", ret);
2035 nvi_mutex_unlock(st);
2038 EXPORT_SYMBOL(nvi_mpu_port_alloc);
2040 int nvi_mpu_port_free(int port)
2042 struct nvi_state *st = nvi_state_local;
2046 if (st->sts & NVI_DBG_SPEW_AUX)
2047 pr_info("%s port %d\n", __func__, port);
2049 pr_debug("%s port %d ERR -EAGAIN\n", __func__, port);
2054 ret = nvi_aux_mpu_call_pre(st, port);
2056 nvi_pm(st, __func__, NVI_PM_ON);
2057 ret = nvi_aux_port_free(st, port);
2058 nvi_pm(st, __func__, NVI_PM_AUTO);
2059 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_port_free=", ret);
2061 nvi_mutex_unlock(st);
2064 EXPORT_SYMBOL(nvi_mpu_port_free);
2066 int nvi_mpu_enable(unsigned int port_mask, bool enable)
2068 struct nvi_state *st = nvi_state_local;
2073 if (st->sts & NVI_DBG_SPEW_AUX)
2074 pr_info("%s port_mask %x: %x\n",
2075 __func__, port_mask, enable);
2077 pr_debug("%s port_mask %x: %x ERR -EAGAIN\n",
2078 __func__, port_mask, enable);
2082 if (port_mask >= (1 << AUX_PORT_IO) || !port_mask)
2085 for (i = 0; i < AUX_PORT_IO; i++) {
2086 if (port_mask & (1 << i)) {
2087 if (!st->aux.port[i].nmp.addr)
2093 if (st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND)) {
2096 nvi_pm(st, __func__, NVI_PM_ON);
2097 ret = nvi_aux_port_enable(st, port_mask, enable);
2098 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_enable=", ret);
2100 nvi_mutex_unlock(st);
2103 EXPORT_SYMBOL(nvi_mpu_enable);
2105 int nvi_mpu_delay_ms(int port, u8 delay_ms)
2107 struct nvi_state *st = nvi_state_local;
2111 if (st->sts & NVI_DBG_SPEW_AUX)
2112 pr_info("%s port %d: %u\n", __func__, port, delay_ms);
2114 pr_debug("%s port %d: %u ERR -EAGAIN\n",
2115 __func__, port, delay_ms);
2120 ret = nvi_aux_mpu_call_pre(st, port);
2122 st->aux.port[port].nmp.delay_ms = delay_ms;
2123 if (st->rc.i2c_slv_ctrl[port] & BIT_SLV_EN)
2124 ret = nvi_aux_delay(st, __func__);
2125 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_delay_ms=", ret);
2127 nvi_mutex_unlock(st);
2130 EXPORT_SYMBOL(nvi_mpu_delay_ms);
2132 int nvi_mpu_data_out(int port, u8 data_out)
2134 struct nvi_state *st = nvi_state_local;
2140 ret = nvi_aux_mpu_call_pre(st, port);
2142 if (st->rc.i2c_slv_ctrl[port] & BIT_SLV_EN) {
2143 ret = nvi_aux_port_data_out(st, port, data_out);
2145 st->aux.port[port].nmp.data_out = data_out;
2146 st->aux.port[port].hw_do = false;
2153 EXPORT_SYMBOL(nvi_mpu_data_out);
2155 int nvi_mpu_batch(int port, unsigned int period_us, unsigned int timeout_us)
2157 struct nvi_state *st = nvi_state_local;
2161 if (st->sts & NVI_DBG_SPEW_AUX)
2162 pr_info("%s port %d: p=%u t=%u\n",
2163 __func__, port, period_us, timeout_us);
2165 pr_debug("%s port %d: p=%u t=%u ERR -EAGAIN\n",
2166 __func__, port, period_us, timeout_us);
2171 ret = nvi_aux_mpu_call_pre(st, port);
2173 if (timeout_us && ((st->aux.port[port].nmp.id == ID_INVALID) ||
2174 (st->aux.port[port].nmp.id >= ID_INVALID_END))) {
2175 /* sensor not supported by DMP */
2178 st->aux.port[port].period_us = period_us;
2179 st->aux.port[port].timeout_us = timeout_us;
2180 ret = nvi_period_aux(st);
2181 if (st->en_msk & (1 << DEV_DMP) &&
2182 st->hal->dmp->fn_dev_batch) {
2183 /* batch can be done real-time with DMP on */
2185 ret = st->hal->dmp->fn_dev_batch(st, DEV_AUX,
2189 /* timings changed */
2192 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_batch=", ret);
2195 nvi_mutex_unlock(st);
2198 EXPORT_SYMBOL(nvi_mpu_batch);
2200 int nvi_mpu_flush(int port)
2202 struct nvi_state *st = nvi_state_local;
2206 if (st->sts & NVI_DBG_SPEW_AUX)
2207 pr_info("%s port %d\n", __func__, port);
2209 pr_debug("%s port %d ERR -EAGAIN\n", __func__, port);
2214 ret = nvi_aux_mpu_call_pre(st, port);
2216 if (st->hal->dev[DEV_AUX]->fifo_en_msk) {
2217 /* HW flush only when FIFO is used for AUX */
2218 st->aux.port[port].flush = true;
2219 ret = nvi_read(st, true);
2221 nvi_flush_aux(st, port);
2223 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_flush=", ret);
2225 nvi_mutex_unlock(st);
2228 EXPORT_SYMBOL(nvi_mpu_flush);
2230 int nvi_mpu_fifo(int port, unsigned int *reserve, unsigned int *max)
2232 struct nvi_state *st = nvi_state_local;
2236 if (st->sts & NVI_DBG_SPEW_AUX)
2237 pr_info("%s port %d\n", __func__, port);
2239 pr_debug("%s port %d ERR -EAGAIN\n", __func__, port);
2244 ret = nvi_aux_mpu_call_pre(st, port);
2246 if ((st->aux.port[port].nmp.id != ID_INVALID) &&
2247 (st->aux.port[port].nmp.id < ID_INVALID_END)) {
2249 /* batch not supported at this time */
2252 /* batch not supported at this time */
2254 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_fifo=", 0);
2259 nvi_mutex_unlock(st);
2262 EXPORT_SYMBOL(nvi_mpu_fifo);
2264 int nvi_mpu_bypass_request(bool enable)
2266 struct nvi_state *st = nvi_state_local;
2270 if (st->sts & NVI_DBG_SPEW_AUX)
2271 pr_info("%s enable=%x\n", __func__, enable);
2273 pr_debug("%s ERR -EAGAIN\n", __func__);
2278 if (!(st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))) {
2279 nvi_pm(st, __func__, NVI_PM_ON);
2280 ret = nvi_aux_bypass_request(st, enable);
2281 nvi_pm(st, __func__, NVI_PM_AUTO);
2282 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_bypass_request=",
2285 nvi_mutex_unlock(st);
2288 EXPORT_SYMBOL(nvi_mpu_bypass_request);
2290 int nvi_mpu_bypass_release(void)
2292 struct nvi_state *st = nvi_state_local;
2295 if (st->sts & NVI_DBG_SPEW_AUX)
2296 pr_info("%s\n", __func__);
2298 pr_debug("%s\n", __func__);
2303 if (!(st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))) {
2304 nvi_pm(st, __func__, NVI_PM_ON);
2305 nvi_aux_bypass_release(st);
2306 nvi_pm(st, __func__, NVI_PM_AUTO);
2307 nvi_aux_mpu_call_post(st, "nvi_mpu_bypass_release", 0);
2309 nvi_mutex_unlock(st);
2312 EXPORT_SYMBOL(nvi_mpu_bypass_release);
2315 int nvi_reset(struct nvi_state *st, const char *fn,
2316 bool rst_fifo, bool rst_i2c, bool en_irq)
2320 bool rst_dmp = false;
2324 ret = nvi_int_able(st, __func__, false);
2326 if (rst_i2c || st->aux.reset_i2c) {
2327 st->aux.reset_i2c = false;
2329 ret |= nvi_aux_enable(st, __func__, false, false);
2330 val |= BIT_I2C_MST_RST;
2333 st->aux.reset_fifo = false;
2334 val |= BIT_FIFO_RST;
2335 if (st->en_msk & (1 << DEV_DMP)) {
2338 ret |= nvi_aux_enable(st, __func__, false, false);
2341 ret |= nvi_user_ctrl_en(st, __func__,
2342 !rst_fifo, !rst_fifo, !rst_i2c, false);
2343 val |= st->rc.user_ctrl;
2344 ret |= nvi_user_ctrl_rst(st, val);
2345 if (rst_i2c || rst_dmp)
2346 ret |= nvi_aux_enable(st, __func__, true, false);
2347 ts = nvs_timestamp();
2349 for (i = 0; i < st->hal->src_n; i++) {
2350 st->src[i].ts_reset = true;
2351 st->src[i].ts_1st = ts;
2352 st->src[i].ts_end = ts;
2353 st->src[i].ts_period = st->src[i].period_us_src * 1000;
2356 for (i = 0; i < DEV_N_AUX; i++) {
2357 st->snsr[i].ts_reset = true;
2358 st->snsr[i].ts_last = ts;
2359 st->snsr[i].ts_n = 0;
2362 for (i = 0; i < AUX_PORT_MAX; i++) {
2363 st->aux.port[i].ts_reset = true;
2364 st->aux.port[i].ts_last = ts;
2369 ret |= st->hal->dmp->fn_clk_n(st, &st->dmp_clk_n);
2370 st->src[SRC_DMP].ts_reset = true;
2371 st->src[SRC_DMP].ts_1st = ts;
2372 st->src[SRC_DMP].ts_end = ts;
2373 st->src[SRC_DMP].ts_period =
2374 st->src[SRC_DMP].period_us_src * 1000;
2378 ret |= nvi_user_ctrl_en(st, __func__, true, true, true, en_irq);
2379 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG |
2380 NVI_DBG_SPEW_FIFO | NVI_DBG_SPEW_TS))
2381 dev_info(&st->i2c->dev,
2382 "%s-%s DMP=%x FIFO=%x I2C=%x ts=%lld err=%d\n",
2383 __func__, fn, rst_dmp, rst_fifo, rst_i2c, ts, ret);
2387 s64 nvi_ts_dev(struct nvi_state *st, s64 ts_now,
2388 unsigned int dev, unsigned int aux_port)
2394 if (st->en_msk & (1 << DEV_DMP))
2397 src = st->hal->dev[dev]->src;
2402 ts = nvs_timestamp();
2404 if (dev == DEV_AUX && aux_port < AUX_PORT_MAX) {
2405 if (st->aux.port[aux_port].ts_reset) {
2406 st->aux.port[aux_port].ts_reset = false;
2407 ts = st->src[src].ts_1st;
2409 ts = st->src[src].ts_period;
2410 if (st->aux.port[aux_port].odr)
2411 ts *= (st->aux.port[aux_port].odr + 1);
2412 ts += st->aux.port[aux_port].ts_last;
2415 if (st->snsr[dev].ts_reset) {
2416 st->snsr[dev].ts_reset = false;
2417 ts = st->src[src].ts_1st;
2419 ts = st->src[src].ts_period;
2420 if (st->snsr[dev].odr)
2421 ts *= (st->snsr[dev].odr + 1);
2422 ts += st->snsr[dev].ts_last;
2426 if (st->sts & (NVI_DBG_SPEW_FIFO | NVI_DBG_SPEW_TS))
2427 dev_info(&st->i2c->dev,
2428 "%s ts > ts_now (%lld > %lld)\n",
2429 __func__, ts, ts_now);
2433 if (dev == DEV_AUX && aux_port < AUX_PORT_MAX) {
2434 if (ts < st->aux.port[aux_port].ts_last)
2437 st->aux.port[aux_port].ts_last = ts;
2439 if (ts < st->snsr[dev].ts_last)
2442 st->snsr[dev].ts_last = ts;
2444 if (ts < st->snsr[dev].ts_push_delay)
2446 if (st->sts & NVI_DBG_SPEW_FIFO && src >= 0)
2447 dev_info(&st->i2c->dev,
2448 "src[%d] ts_period=%lld ts_end=%lld %s ts[%u]=%lld\n",
2449 src, st->src[src].ts_period, st->src[src].ts_end,
2450 st->snsr[dev].cfg.name, st->snsr[dev].ts_n, ts);
2451 st->snsr[dev].ts_n++;
2455 static void nvi_aux_rd(struct nvi_state *st)
2459 struct aux_port *ap;
2464 if ((!st->aux.ext_data_n) || (!(st->rc.user_ctrl & BIT_I2C_MST_EN)))
2467 ret = nvi_i2c_r(st, st->hal->reg->ext_sens_data_00.bank,
2468 st->hal->reg->ext_sens_data_00.reg,
2469 st->aux.ext_data_n, (u8 *)&st->aux.ext_data);
2473 ts = nvi_ts_dev(st, 0, DEV_AUX, -1);
2474 for (i = 0; i < AUX_PORT_IO; i++) {
2475 ap = &st->aux.port[i];
2476 if ((st->rc.i2c_slv_ctrl[i] & BIT_SLV_EN) &&
2477 (ap->nmp.addr & BIT_I2C_READ) &&
2478 (ap->nmp.handler != NULL)) {
2479 p = &st->aux.ext_data[ap->ext_data_offset];
2480 len = ap->nmp.ctrl & BITS_I2C_SLV_CTRL_LEN;
2481 ap->nmp.handler(p, len, ts, ap->nmp.ext_driver);
2486 static s32 nvi_matrix(struct nvi_state *st, signed char *matrix,
2487 s32 x, s32 y, s32 z, unsigned int axis)
2489 return ((matrix[0 + axis] == 1 ? x :
2490 (matrix[0 + axis] == -1 ? -x : 0)) +
2491 (matrix[3 + axis] == 1 ? y :
2492 (matrix[3 + axis] == -1 ? -y : 0)) +
2493 (matrix[6 + axis] == 1 ? z :
2494 (matrix[6 + axis] == -1 ? -z : 0)));
2497 int nvi_push(struct nvi_state *st, unsigned int dev, u8 *buf, s64 ts)
2504 unsigned int buf_le_i;
2511 ch_sz = abs(st->snsr[dev].cfg.ch_sz);
2513 if (st->snsr[dev].buf_n) {
2514 n = st->snsr[dev].buf_n / st->snsr[dev].cfg.ch_n;
2515 m = st->snsr[dev].buf_n % st->snsr[dev].cfg.ch_n;
2521 /* convert big endian byte stream to little endian channel data */
2522 for (ch = 0; ch < st->snsr[dev].cfg.ch_n; ch++) {
2524 if (st->snsr[dev].enable & (1 << ch)) {
2525 if (m && ch == (st->snsr[dev].cfg.ch_n - 1)) {
2526 /* handle last channel misalignment */
2527 for (i = 0; i < m; i++) {
2529 val_le[ch] |= (u8)*buf++;
2531 /* extend sign bit */
2532 i = (sizeof(val_le[ch]) - m) * 8;
2536 for (i = 0; i < n; i++) {
2538 val_le[ch] |= (u8)*buf++;
2540 /* extend sign bit */
2541 i = (sizeof(val_le[ch]) - n) * 8;
2550 /* shift HW data size to channel size if needed */
2551 if (st->snsr[dev].buf_shft) {
2552 if (st->snsr[dev].buf_shft < 0) {
2553 n = abs(st->snsr[dev].buf_shft);
2554 for (ch = 0; ch < st->snsr[dev].cfg.ch_n; ch++)
2557 for (ch = 0; ch < st->snsr[dev].cfg.ch_n; ch++)
2558 val_le[ch] <<= st->snsr[dev].buf_shft;
2562 /* apply matrix if needed */
2563 if (st->snsr[dev].matrix) {
2564 for (ch = 0; ch < AXIS_N; ch++)
2565 val[ch] = val_le[ch];
2567 for (ch = 0; ch < AXIS_N; ch++)
2568 val_le[ch] = nvi_matrix(st, st->snsr[dev].cfg.matrix,
2569 val[AXIS_X], val[AXIS_Y],
2573 /* convert little endian channel data to little endian byte stream */
2575 for (ch = 0; ch < st->snsr[dev].cfg.ch_n; ch++) {
2576 u_val = (u32)val_le[ch];
2577 for (i = 0; i < ch_sz; i++) {
2578 buf_le[buf_le_i + i] = (u8)(u_val & 0xFF);
2584 /* add status if needed (no endian conversion) */
2585 if (buf_le_i < st->snsr[dev].cfg.snsr_data_n) {
2586 n = st->snsr[dev].cfg.snsr_data_n - buf_le_i;
2587 u_val = st->snsr[dev].sts;
2588 for (i = 0; i < n; i++) {
2589 buf_le[buf_le_i + i] = (u8)(u_val & 0xFF);
2595 if (st->sts & (NVI_DBG_SPEW_SNSR << dev)) {
2597 st->sts |= NVS_STS_SPEW_DATA;
2598 st->nvs->handler(st->snsr[dev].nvs_st, buf_le, ts);
2599 if (!(sts & NVS_STS_SPEW_DATA))
2600 st->sts &= ~NVS_STS_SPEW_DATA;
2602 st->nvs->handler(st->snsr[dev].nvs_st, buf_le, ts);
2608 static int nvi_push_event(struct nvi_state *st, unsigned int dev)
2610 s64 ts = nvs_timestamp();
2615 if (st->sts & (NVI_DBG_SPEW_SNSR << dev)) {
2617 st->sts |= NVS_STS_SPEW_DATA;
2618 ret = st->nvs->handler(st->snsr[dev].nvs_st, &val, ts);
2619 if (!(sts & NVS_STS_SPEW_DATA))
2620 st->sts &= ~NVS_STS_SPEW_DATA;
2622 ret = st->nvs->handler(st->snsr[dev].nvs_st, &val, ts);
2627 static int nvi_push_oneshot(struct nvi_state *st, unsigned int dev)
2629 /* disable now to avoid reinitialization on handler's disable */
2630 st->snsr[dev].enable = 0;
2631 st->en_msk &= ~(1 << dev);
2632 return nvi_push_event(st, dev);
2635 static int nvi_dev_rd(struct nvi_state *st, unsigned int dev)
2641 if (!st->snsr[dev].enable)
2644 len = st->snsr[dev].cfg.ch_n << 1;
2645 ret = nvi_i2c_r(st, st->hal->reg->out_h[dev].bank,
2646 st->hal->reg->out_h[dev].reg, len, buf);
2648 ret = nvi_push(st, dev, buf, nvi_ts_dev(st, 0, dev, 0));
2652 static int nvi_fifo_aux(struct nvi_state *st, s64 ts, unsigned int n)
2654 struct aux_port *ap;
2655 unsigned int fifo_data_n;
2658 ts = nvi_ts_dev(st, ts, DEV_AUX, -1);
2659 for (port = 0; port < AUX_PORT_IO; port++) {
2660 ap = &st->aux.port[port];
2661 if (st->rc.fifo_en & (1 << st->hal->bit->slv_fifo_en[port])) {
2662 fifo_data_n = ap->nmp.ctrl & BITS_I2C_SLV_CTRL_LEN;
2663 if (fifo_data_n > n)
2666 ap->nmp.handler(&st->buf[st->buf_i], fifo_data_n, ts,
2667 ap->nmp.ext_driver);
2668 st->buf_i += fifo_data_n;
2671 if (st->sts & (NVS_STS_SUSPEND | NVS_STS_SHUTDOWN))
2678 static int nvi_fifo_dev_rd(struct nvi_state *st, s64 ts, unsigned int n,
2681 if (st->sts & (NVS_STS_SUSPEND | NVS_STS_SHUTDOWN))
2684 if (st->hal->dev[dev]->fifo_data_n > n)
2687 nvi_push(st, dev, &st->buf[st->buf_i], nvi_ts_dev(st, ts, dev, 0));
2688 st->buf_i += st->hal->dev[dev]->fifo_data_n;
2692 static int nvi_fifo_dev(struct nvi_state *st, s64 ts, unsigned int n)
2697 dev = st->hal->fifo_dev[(st->rc.fifo_cfg >> 2) & 0x07];
2699 ret = nvi_fifo_aux(st, ts, n);
2701 ret = nvi_fifo_dev_rd(st, ts, n, dev);
2705 static int nvi_fifo_devs(struct nvi_state *st, s64 ts, unsigned int n)
2710 for (dev = 0; dev < DEV_MPU_N; dev++) {
2711 if (st->rc.fifo_en & st->hal->dev[dev]->fifo_en_msk) {
2712 ret = nvi_fifo_dev_rd(st, ts, n, dev);
2718 if (st->rc.fifo_en & st->hal->dev[DEV_AUX]->fifo_en_msk)
2719 ret = nvi_fifo_aux(st, ts, n);
2723 /* fifo_n_max can be used if we want to round-robin FIFOs */
2724 static int nvi_fifo_rd(struct nvi_state *st, int src, unsigned int fifo_n_max,
2725 int (*fn)(struct nvi_state *st, s64 ts, unsigned int n))
2734 unsigned int fifo_n;
2738 ts_end = nvs_timestamp();
2741 ret = st->hal->dmp->fn_clk_n(st, &dmp_clk_n);
2742 ret |= nvi_i2c_rd(st, &st->hal->reg->fifo_count_h, (u8 *)&fifo_count);
2743 if (ret || !fifo_count)
2746 ts_now = nvs_timestamp();
2747 if (ts_now < (ts_end + 5000000))
2751 ts_end = atomic64_read(&st->ts_irq);
2752 fifo_n = (unsigned int)be16_to_cpu(fifo_count);
2753 if (st->sts & NVS_STS_SPEW_IRQ)
2754 dev_info(&st->i2c->dev,
2755 "src=%d sync=%x fifo_n=%u ts_clk_n=%u ts_diff=%lld\n",
2756 src, sync, fifo_n, dmp_clk_n, ts_now - st->ts_now);
2757 st->ts_now = ts_now;
2760 if (dmp_clk_n > st->dmp_clk_n)
2761 ts_n = dmp_clk_n - st->dmp_clk_n;
2763 /* counter rolled over */
2764 ts_n = (~st->dmp_clk_n + 1) + dmp_clk_n;
2765 /* ts_n is the number of DMP clock ticks since last time */
2766 st->dmp_clk_n = dmp_clk_n;
2768 fifo_n_max = 0; /* DMP disables round-robin FIFOs */
2771 ts_n = fifo_n / st->src[src].fifo_data_n; /* TS's needed */
2772 if ((fifo_n % st->src[src].fifo_data_n) || !ts_n)
2773 /* reset FIFO if doesn't divide cleanly */
2778 ts_period = st->src[src].period_us_src * 1000;
2779 if (sync && ts_end > st->src[src].ts_end && ts_end < ts_now &&
2780 ts_end > (ts_now - (ts_period >> 2)))
2781 /* ts_irq is within the rate so sync to IRQ */
2783 if (st->src[src].ts_reset) {
2784 st->src[src].ts_reset = false;
2785 ts_end = st->src[src].ts_period * (ts_n - 1);
2787 st->src[src].ts_1st = ts_now - ts_end;
2788 st->src[src].ts_end = st->src[src].ts_1st;
2791 ts_end = st->src[src].ts_period * ts_n;
2793 ts_end += st->src[src].ts_end;
2794 /* ts_now will be sent to nvi_ts_dev where the timestamp is
2795 * prevented from going into the future which allows some
2796 * tolerance here for ts_end being a little more than ts_now.
2797 * The more tolerance we have the less recalculating the period
2798 * to avoid swing around the true period. Plus, the clamp on
2799 * ts_now in nvi_ts_dev has the benefit of "syncing" with the
2800 * current calculations per device.
2802 if (ts_end > (ts_now + (ts_period >> 3)) || (sync && (ts_end <
2803 (ts_now - (ts_period >> 1))))) {
2804 if (st->sts & (NVI_DBG_SPEW_FIFO | NVI_DBG_SPEW_TS)) {
2805 dev_info(&st->i2c->dev,
2806 "sync=%x now=%lld end=%lld ts_n=%u\n",
2807 sync, ts_now, ts_end, ts_n);
2808 dev_info(&st->i2c->dev,
2809 "src=%d old period=%lld end=%lld\n",
2810 src, st->src[src].ts_period,
2811 st->src[src].ts_end);
2813 /* st->src[src].ts_period needs to be adjusted */
2814 ts_period = ts_now - st->src[src].ts_end;
2815 do_div(ts_period, ts_n);
2816 st->src[src].ts_period = ts_period;
2817 ts_end = ts_period * ts_n;
2818 ts_end += st->src[src].ts_end;
2819 if (st->sts & (NVI_DBG_SPEW_FIFO | NVI_DBG_SPEW_TS))
2820 dev_info(&st->i2c->dev,
2821 "src=%d new period=%lld end=%lld\n",
2822 src, ts_period, ts_end);
2825 /* would only apply to FIFO timing (non-DMP) */
2826 if (fifo_n_max < fifo_n) {
2827 fifo_n = fifo_n_max;
2828 ts_n = fifo_n / st->src[src].fifo_data_n;
2829 ts_end = st->src[src].ts_period * ts_n;
2830 ts_end += st->src[src].ts_end;
2833 st->src[src].ts_end = ts_end;
2835 /* wasn't able to calculate TS */
2840 buf_n = sizeof(st->buf) - st->buf_i;
2843 ret = nvi_i2c_r(st, st->hal->reg->fifo_rw.bank,
2844 st->hal->reg->fifo_rw.reg,
2845 buf_n, &st->buf[st->buf_i]);
2852 /* fn updates st->buf_i */
2853 while (st->buf_i < buf_n) {
2854 ret = fn(st, ts_now, buf_n - st->buf_i);
2855 /* ret < 0: error to exit
2856 * ret = 0: not enough data to process
2857 * ret > 0: all done processing data
2865 memcpy(st->buf, &st->buf[st->buf_i], buf_n);
2877 static int nvi_rd(struct nvi_state *st)
2885 if (st->en_msk & (1 << DEV_DMP)) {
2886 if (st->en_msk & ((1 << DEV_SM) | (1 << DEV_STP))) {
2887 ret = nvi_i2c_rd(st, &st->hal->reg->int_dmp, &val);
2888 if (val & (1 << st->hal->bit->dmp_int_sm))
2889 nvi_push_oneshot(st, DEV_SM);
2890 if (val & (1 << st->hal->bit->dmp_int_stp))
2891 nvi_push_event(st, DEV_STP);
2893 if (st->en_msk & st->dmp_en_msk)
2895 return nvi_fifo_rd(st, -1, 0, st->hal->dmp->fn_rd);
2901 if (st->pm == NVI_PM_ON_CYCLE) {
2902 /* only low power accelerometer data */
2903 nvi_pm(st, __func__, NVI_PM_ON);
2904 ret = nvi_dev_rd(st, DEV_ACC);
2905 nvi_pm(st, __func__, NVI_PM_AUTO);
2909 nvi_dev_rd(st, DEV_TMP);
2910 if (!(st->rc.fifo_en & st->hal->dev[DEV_AUX]->fifo_en_msk))
2912 /* handle FIFO enabled data */
2913 if (st->rc.fifo_cfg & 0x01) {
2914 /* multi FIFO enabled */
2915 int_msk = 1 << st->hal->bit->int_data_rdy_0;
2916 for (fifo = 0; fifo < st->hal->fifo_n; fifo++) {
2917 if (st->rc.int_enable & (int_msk << fifo)) {
2918 ret = nvi_wr_fifo_cfg(st, fifo);
2922 src = st->hal->dev[st->hal->
2923 fifo_dev[fifo]]->src;
2924 ret = nvi_fifo_rd(st, src, 0, nvi_fifo_dev);
2925 if (st->buf_i || (ret < 0)) {
2926 /* HW FIFO misalignment - reset */
2933 /* st->fifo_src is either SRC_MPU or the source for the single
2934 * device enabled for the single FIFO in ICM.
2936 ret = nvi_fifo_rd(st, st->fifo_src, 0, nvi_fifo_devs);
2937 if (st->buf_i || (ret < 0)) {
2938 /* HW FIFO misalignment - reset */
2947 static int nvi_read(struct nvi_state *st, bool flush)
2951 if (st->irq_dis && !(st->sts & NVS_STS_SHUTDOWN)) {
2952 dev_err(&st->i2c->dev, "%s ERR: IRQ storm reset. n=%u\n",
2953 __func__, st->irq_storm_n);
2954 st->irq_storm_n = 0;
2955 nvi_pm(st, __func__, NVI_PM_ON);
2956 nvi_wr_pm1(st, __func__, BIT_H_RESET);
2959 } else if (!(st->sts & (NVS_STS_SUSPEND | NVS_STS_SHUTDOWN))) {
2962 nvi_en(st); /* a little harder reset for ICM DMP */
2964 nvi_reset(st, __func__, true, false, true);
2971 static irqreturn_t nvi_thread(int irq, void *dev_id)
2973 struct nvi_state *st = (struct nvi_state *)dev_id;
2976 nvi_read(st, false);
2977 nvi_mutex_unlock(st);
2981 static irqreturn_t nvi_handler(int irq, void *dev_id)
2983 struct nvi_state *st = (struct nvi_state *)dev_id;
2984 u64 ts = nvs_timestamp();
2985 u64 ts_old = atomic64_xchg(&st->ts_irq, ts);
2986 u64 ts_diff = ts - ts_old;
2988 /* test for MPU IRQ storm problem */
2989 if (ts_diff < NVI_IRQ_STORM_MIN_NS) {
2991 if (st->irq_storm_n > NVI_IRQ_STORM_MAX_N)
2992 nvi_disable_irq(st);
2994 st->irq_storm_n = 0;
2997 if (st->sts & NVS_STS_SPEW_IRQ)
2998 dev_info(&st->i2c->dev, "%s ts=%llu ts_diff=%llu irq_dis=%x\n",
2999 __func__, ts, ts_diff, st->irq_dis);
3000 return IRQ_WAKE_THREAD;
3003 static int nvi_enable(void *client, int snsr_id, int enable)
3005 struct nvi_state *st = (struct nvi_state *)client;
3008 /* return current enable request status */
3009 return st->snsr[snsr_id].enable;
3011 if (st->snsr[snsr_id].enable == enable)
3012 /* nothing has changed with enable request */
3015 st->snsr[snsr_id].enable = enable;
3017 /* officially flagged as off here */
3018 st->en_msk &= ~(1 << snsr_id);
3019 if (st->sts & NVS_STS_SUSPEND)
3020 /* speed up suspend/resume by not doing nvi_en for every dev */
3023 if (snsr_id == DEV_TMP)
3024 /* this is a static sensor that will be read when gyro is on */
3027 if (st->en_msk & (1 << DEV_DMP)) {
3028 /* DMP is currently on */
3029 if (!(st->en_msk & st->dmp_en_msk))
3030 /* DMP may get turned off (may stay on due to batch) so
3031 * we update timings that may have changed while DMP
3036 nvi_period_src(st, st->hal->dev[snsr_id]->src);
3042 static int nvi_batch(void *client, int snsr_id, int flags,
3043 unsigned int period, unsigned int timeout)
3045 struct nvi_state *st = (struct nvi_state *)client;
3048 if (timeout && !st->snsr[snsr_id].cfg.fifo_max_evnt_cnt)
3051 if (snsr_id == DEV_TMP)
3054 if (period == st->snsr[snsr_id].period_us &&
3055 timeout == st->snsr[snsr_id].timeout_us)
3058 st->snsr[snsr_id].period_us = period;
3059 st->snsr[snsr_id].timeout_us = timeout;
3060 if (!st->snsr[snsr_id].enable)
3063 ret = nvi_timeout(st);
3064 if (st->en_msk & (1 << DEV_DMP)) {
3065 if (st->hal->dmp->fn_dev_batch)
3066 /* batch can be done in real-time with the DMP on */
3068 ret = st->hal->dmp->fn_dev_batch(st, snsr_id, -1);
3072 ret |= nvi_period_src(st, st->hal->dev[snsr_id]->src);
3080 static int nvi_flush(void *client, int snsr_id)
3082 struct nvi_state *st = (struct nvi_state *)client;
3085 if (st->snsr[snsr_id].enable) {
3086 st->snsr[snsr_id].flush = true;
3087 ret = nvi_read(st, true);
3092 static int nvi_max_range(void *client, int snsr_id, int max_range)
3094 struct nvi_state *st = (struct nvi_state *)client;
3095 unsigned int i = max_range;
3098 if (snsr_id < 0 || snsr_id >= DEV_N)
3101 if (st->snsr[snsr_id].enable)
3102 /* can't change settings on the fly (disable device first) */
3105 if (i > st->hal->dev[snsr_id]->rr_0n)
3106 /* clamp to highest setting */
3107 i = st->hal->dev[snsr_id]->rr_0n;
3108 st->snsr[snsr_id].usr_cfg = i;
3109 st->snsr[snsr_id].cfg.resolution.ival =
3110 st->hal->dev[snsr_id]->rr[i].resolution.ival;
3111 st->snsr[snsr_id].cfg.resolution.fval =
3112 st->hal->dev[snsr_id]->rr[i].resolution.fval;
3113 st->snsr[snsr_id].cfg.max_range.ival =
3114 st->hal->dev[snsr_id]->rr[i].max_range.ival;
3115 st->snsr[snsr_id].cfg.max_range.fval =
3116 st->hal->dev[snsr_id]->rr[i].max_range.fval;
3117 st->snsr[snsr_id].cfg.offset.ival = st->hal->dev[snsr_id]->offset.ival;
3118 st->snsr[snsr_id].cfg.offset.fval = st->hal->dev[snsr_id]->offset.fval;
3119 st->snsr[snsr_id].cfg.scale.ival = st->hal->dev[snsr_id]->scale.ival;
3120 st->snsr[snsr_id].cfg.scale.fval = st->hal->dev[snsr_id]->scale.fval;
3121 /* AXIS sensors need resolution put in the scales */
3122 if (st->snsr[snsr_id].cfg.ch_n_max) {
3123 for (ch = 0; ch < st->snsr[snsr_id].cfg.ch_n_max; ch++) {
3124 st->snsr[snsr_id].cfg.scales[ch].ival =
3125 st->snsr[snsr_id].cfg.resolution.ival;
3126 st->snsr[snsr_id].cfg.scales[ch].fval =
3127 st->snsr[snsr_id].cfg.resolution.fval;
3134 static int nvi_offset(void *client, int snsr_id, int channel, int offset)
3136 struct nvi_state *st = (struct nvi_state *)client;
3140 if (snsr_id >= DEV_AXIS_N || channel >= AXIS_N)
3143 old = st->dev_offset[snsr_id][channel];
3144 st->dev_offset[snsr_id][channel] = offset;
3145 if (st->en_msk & (1 << snsr_id)) {
3148 st->dev_offset[snsr_id][channel] = old;
3156 static int nvi_thresh_lo(void *client, int snsr_id, int thresh_lo)
3158 struct nvi_state *st = (struct nvi_state *)client;
3166 st->snsr[DEV_SM].cfg.thresh_lo = thresh_lo;
3167 if (st->en_msk & (1 << DEV_DMP))
3168 ret = st->hal->dmp->fn_dev_init(st, snsr_id);
3178 static int nvi_thresh_hi(void *client, int snsr_id, int thresh_hi)
3180 struct nvi_state *st = (struct nvi_state *)client;
3186 st->en_msk |= (1 << EN_LP);
3188 st->en_msk &= ~(1 << EN_LP);
3192 st->snsr[DEV_SM].cfg.thresh_hi = thresh_hi;
3193 if (st->en_msk & (1 << DEV_DMP))
3194 ret = st->hal->dmp->fn_dev_init(st, snsr_id);
3204 static int nvi_reset_dev(void *client, int snsr_id)
3206 struct nvi_state *st = (struct nvi_state *)client;
3209 ret = nvi_pm(st, __func__, NVI_PM_ON);
3210 ret |= nvi_wr_pm1(st, __func__, BIT_H_RESET);
3216 static int nvi_self_test(void *client, int snsr_id, char *buf)
3218 struct nvi_state *st = (struct nvi_state *)client;
3221 nvi_pm(st, __func__, NVI_PM_ON);
3222 nvi_aux_enable(st, __func__, false, false);
3223 nvi_user_ctrl_en(st, __func__, false, false, false, false);
3224 if (snsr_id == DEV_ACC)
3225 ret = st->hal->fn->st_acc(st);
3226 else if (snsr_id == DEV_GYR)
3227 ret = st->hal->fn->st_gyr(st);
3230 nvi_aux_enable(st, __func__, true, false);
3234 return sprintf(buf, "%d FAIL\n", ret);
3236 return sprintf(buf, "%d PASS\n", ret);
3239 static int nvi_regs(void *client, int snsr_id, char *buf)
3241 struct nvi_state *st = (struct nvi_state *)client;
3248 t = sprintf(buf, "registers: (only data != 0 shown)\n");
3249 for (j = 0; j < st->hal->reg_bank_n; j++) {
3250 t += sprintf(buf + t, "bank %u:\n", j);
3251 for (i = 0; i < st->hal->regs_n; i++) {
3252 if ((j == st->hal->reg->fifo_rw.bank) &&
3253 (i == st->hal->reg->fifo_rw.reg))
3256 ret = nvi_i2c_r(st, j, i, 1, &data);
3258 t += sprintf(buf + t, "0x%02x=ERR\n", i);
3260 t += sprintf(buf + t,
3261 "0x%02x=0x%02x\n", i, data);
3267 static int nvi_nvs_write(void *client, int snsr_id, unsigned int nvs)
3269 struct nvi_state *st = (struct nvi_state *)client;
3271 switch (nvs & 0xFF) {
3274 case NVI_INFO_REG_WR:
3275 case NVI_INFO_MEM_RD:
3276 case NVI_INFO_MEM_WR:
3277 case NVI_INFO_DMP_FW:
3278 case NVI_INFO_DMP_EN_MSK:
3281 case NVI_INFO_DBG_SPEW:
3282 st->sts ^= NVI_DBG_SPEW_MSG;
3285 case NVI_INFO_AUX_SPEW:
3286 st->sts ^= NVI_DBG_SPEW_AUX;
3287 nvi_aux_dbg(st, "SNAPSHOT", 0);
3290 case NVI_INFO_FIFO_SPEW:
3291 st->sts ^= NVI_DBG_SPEW_FIFO;
3294 case NVI_INFO_TS_SPEW:
3295 st->sts ^= NVI_DBG_SPEW_TS;
3299 if (nvs < (NVI_INFO_SNSR_SPEW + DEV_N))
3300 st->sts ^= (NVI_DBG_SPEW_SNSR <<
3301 (nvs - NVI_INFO_SNSR_SPEW));
3310 static int nvi_nvs_read(void *client, int snsr_id, char *buf)
3312 struct nvi_state *st = (struct nvi_state *)client;
3321 st->info = NVI_INFO_VER;
3322 switch (info & 0xFF) {
3324 t = sprintf(buf, "NVI driver v. %u\n", NVI_DRIVER_VERSION);
3325 if (st->en_msk & (1 << FW_LOADED)) {
3326 t += sprintf(buf + t, "DMP FW v. %u\n",
3327 st->hal->dmp->fw_ver);
3328 t += sprintf(buf + t, "DMP enabled=%u\n",
3329 !!(st->en_msk & (1 << DEV_DMP)));
3331 t += sprintf(buf + t, "standby_en=%x\n",
3332 !!(st->en_msk & (1 << EN_STDBY)));
3333 t += sprintf(buf + t, "bypass_timeout_ms=%u\n",
3334 st->bypass_timeout_ms);
3335 for (i = 0; i < DEV_N_AUX; i++) {
3336 if (st->snsr[i].push_delay_ns)
3337 t += sprintf(buf + t,
3338 "%s_push_delay_ns=%lld\n",
3339 st->snsr[i].cfg.name,
3340 st->snsr[i].push_delay_ns);
3343 for (i = 0; i < DEV_N_AUX; i++) {
3344 if ((st->dmp_dev_msk | MSK_DEV_MPU_AUX) & (1 << i)) {
3345 if (st->dmp_en_msk & (1 << i))
3346 t += sprintf(buf + t, "%s_dmp_en=1\n",
3347 st->snsr[i].cfg.name);
3349 t += sprintf(buf + t, "%s_dmp_en=0\n",
3350 st->snsr[i].cfg.name);
3357 t = sprintf(buf, "en_msk=%x\n", st->en_msk);
3358 t += sprintf(buf + t, "sts=%x\n", st->sts);
3359 t += sprintf(buf + t, "pm=%d\n", st->pm);
3360 t += sprintf(buf + t, "bm_timeout_us=%u\n", st->bm_timeout_us);
3361 t += sprintf(buf + t, "fifo_src=%d\n", st->fifo_src);
3362 for (i = 0; i < DEV_N_AUX; i++) {
3363 t += sprintf(buf + t, "snsr[%u] %s:\n",
3364 i, st->snsr[i].cfg.name);
3365 t += sprintf(buf + t, "enable=%x\n",
3366 st->snsr[i].enable);
3367 t += sprintf(buf + t, "period_us=%u\n",
3368 st->snsr[i].period_us);
3369 t += sprintf(buf + t, "timeout_us=%u\n",
3370 st->snsr[i].timeout_us);
3371 t += sprintf(buf + t, "odr=%u\n",
3373 t += sprintf(buf + t, "ts_last=%lld\n",
3374 st->snsr[i].ts_last);
3375 t += sprintf(buf + t, "ts_reset=%x\n",
3376 st->snsr[i].ts_reset);
3377 t += sprintf(buf + t, "flush=%x\n",
3379 t += sprintf(buf + t, "matrix=%x\n",
3380 st->snsr[i].matrix);
3381 t += sprintf(buf + t, "buf_shft=%d\n",
3382 st->snsr[i].buf_shft);
3383 t += sprintf(buf + t, "buf_n=%u\n",
3389 st->hal->dmp->fn_clk_n(st, &n);
3390 t += sprintf(buf + t, "nvi_dmp_clk_n=%u\n", n);
3391 t += sprintf(buf + t, "st->dmp_clk_n=%u\n",
3397 for (i = 0; i < SRC_N; i++) {
3398 if (i >= st->hal->src_n && i != SRC_DMP)
3401 t += sprintf(buf + t, "src[%u]:\n", i);
3402 t += sprintf(buf + t, "ts_reset=%x\n",
3403 st->src[i].ts_reset);
3404 t += sprintf(buf + t, "ts_end=%lld\n",
3406 t += sprintf(buf + t, "ts_period=%lld\n",
3407 st->src[i].ts_period);
3408 t += sprintf(buf + t, "period_us_src=%u\n",
3409 st->src[i].period_us_src);
3410 t += sprintf(buf + t, "period_us_req=%u\n",
3411 st->src[i].period_us_req);
3412 t += sprintf(buf + t, "fifo_data_n=%u\n",
3413 st->src[i].fifo_data_n);
3414 t += sprintf(buf + t, "base_t=%u\n",
3419 case NVI_INFO_DBG_SPEW:
3420 return sprintf(buf, "DBG spew=%x\n",
3421 !!(st->sts & NVI_DBG_SPEW_MSG));
3423 case NVI_INFO_AUX_SPEW:
3424 return sprintf(buf, "AUX spew=%x\n",
3425 !!(st->sts & NVI_DBG_SPEW_AUX));
3427 case NVI_INFO_FIFO_SPEW:
3428 return sprintf(buf, "FIFO spew=%x\n",
3429 !!(st->sts & NVI_DBG_SPEW_FIFO));
3431 case NVI_INFO_TS_SPEW:
3432 return sprintf(buf, "TS spew=%x\n",
3433 !!(st->sts & NVI_DBG_SPEW_TS));
3435 case NVI_INFO_REG_WR:
3437 buf_rw[0] = (u8)(info >> 16);
3438 buf_rw[1] = (u8)(info >> 8);
3439 ret = nvi_i2c_write(st, info >> 24, 2, buf_rw);
3440 return sprintf(buf, "REG WR: b=%02x r=%02x d=%02x ERR=%d\n",
3441 info >> 24, buf_rw[0], buf_rw[1], ret);
3443 case NVI_INFO_MEM_RD:
3444 n = (info >> 8) & 0xFF;
3447 ret = nvi_mem_rd(st, info >> 16, n, buf_rw);
3449 return sprintf(buf, "MEM RD: ERR=%d\n", ret);
3451 t = sprintf(buf, "MEM RD:\n");
3452 for (i = 0; i < n; i++) {
3454 t += sprintf(buf + t, "%04x: ",
3456 t += sprintf(buf + t, "%02x ", buf_rw[i]);
3458 t += sprintf(buf + t, "\n");
3460 t += sprintf(buf + t, "\n");
3463 case NVI_INFO_MEM_WR:
3465 buf_rw[0] = (u8)(info >> 8);
3466 ret = nvi_mem_wr(st, info >> 16, 1, buf_rw, true);
3467 return sprintf(buf, "MEM WR: a=%04x d=%02x ERR=%d\n",
3468 info >> 16, buf_rw[0], ret);
3470 case NVI_INFO_DMP_FW:
3471 ret = nvi_dmp_fw(st);
3472 return sprintf(buf, "DMP FW: ERR=%d\n", ret);
3474 case NVI_INFO_DMP_EN_MSK:
3475 st->dmp_en_msk = (info >> 8) & MSK_DEV_ALL;
3476 return sprintf(buf, "st->dmp_en_msk=%x\n", st->dmp_en_msk);
3479 i = info - NVI_INFO_SNSR_SPEW;
3481 return sprintf(buf, "%s spew=%x\n",
3482 st->snsr[i].cfg.name,
3483 !!(st->sts & (NVI_DBG_SPEW_SNSR << i)));
3490 static struct nvs_fn_dev nvi_nvs_fn = {
3491 .enable = nvi_enable,
3494 .max_range = nvi_max_range,
3495 .offset = nvi_offset,
3496 .thresh_lo = nvi_thresh_lo,
3497 .thresh_hi = nvi_thresh_hi,
3498 .reset = nvi_reset_dev,
3499 .self_test = nvi_self_test,
3501 .nvs_write = nvi_nvs_write,
3502 .nvs_read = nvi_nvs_read,
3506 static int nvi_suspend(struct device *dev)
3508 struct i2c_client *client = to_i2c_client(dev);
3509 struct nvi_state *st = i2c_get_clientdata(client);
3513 s64 ts = 0; /* = 0 to fix compile */
3515 if (st->sts & NVS_STS_SPEW_MSG)
3516 ts = nvs_timestamp();
3517 st->sts |= NVS_STS_SUSPEND;
3519 for (i = 0; i < DEV_N; i++)
3520 ret_t |= st->nvs->suspend(st->snsr[i].nvs_st);
3524 ret_t |= nvi_en(st);
3525 for (i = 0; i < DEV_N; i++) {
3526 if (st->snsr[i].enable && (st->snsr[i].cfg.flags &
3527 SENSOR_FLAG_WAKE_UP)) {
3528 ret = irq_set_irq_wake(st->i2c->irq, 1);
3530 st->irq_set_irq_wake = true;
3535 if (st->sts & NVS_STS_SPEW_MSG)
3536 dev_info(&client->dev,
3537 "%s WAKE_ON=%x elapsed_t=%lldns err=%d\n", __func__,
3538 st->irq_set_irq_wake, nvs_timestamp() - ts, ret_t);
3539 nvi_mutex_unlock(st);
3543 static int nvi_resume(struct device *dev)
3545 struct i2c_client *client = to_i2c_client(dev);
3546 struct nvi_state *st = i2c_get_clientdata(client);
3547 s64 ts = 0; /* = 0 to fix compile */
3551 if (st->sts & NVS_STS_SPEW_MSG)
3552 ts = nvs_timestamp();
3554 if (st->irq_set_irq_wake) {
3555 /* determine if wake source */
3556 ret = nvi_rd_int_status(st);
3558 dev_err(&client->dev, "%s IRQ STS ERR=%d\n",
3561 if (st->sts & NVS_STS_SPEW_MSG)
3562 dev_info(&client->dev,
3563 "%s IRQ STS=%#x DMP=%#x\n", __func__,
3564 st->rc.int_status, st->rc.int_dmp);
3565 if (st->rc.int_status & (1 << st->hal->bit->int_dmp)) {
3566 if (st->rc.int_dmp &
3567 (1 << st->hal->bit->dmp_int_sm))
3568 nvi_push_oneshot(st, DEV_SM);
3571 ret = irq_set_irq_wake(st->i2c->irq, 0);
3573 st->irq_set_irq_wake = false;
3575 nvi_mutex_unlock(st);
3578 for (i = 0; i < DEV_N; i++)
3579 ret |= st->nvs->resume(st->snsr[i].nvs_st);
3583 for (i = 0; i < AUX_PORT_MAX; i++) {
3584 if (st->aux.port[i].nmp.shutdown_bypass)
3587 if (i < AUX_PORT_MAX) {
3588 nvi_pm(st, __func__, NVI_PM_ON);
3589 nvi_aux_bypass_enable(st, false);
3591 st->sts &= ~NVS_STS_SUSPEND;
3594 if (st->sts & NVS_STS_SPEW_MSG)
3595 dev_info(&client->dev, "%s elapsed_t=%lldns err=%d\n",
3596 __func__, nvs_timestamp() - ts, ret);
3597 nvi_mutex_unlock(st);
3601 static const struct dev_pm_ops nvi_pm_ops = {
3602 .suspend = nvi_suspend,
3603 .resume = nvi_resume,
3606 static void nvi_shutdown(struct i2c_client *client)
3608 struct nvi_state *st = i2c_get_clientdata(client);
3611 st->sts |= NVS_STS_SHUTDOWN;
3613 for (i = 0; i < DEV_N; i++)
3614 st->nvs->shutdown(st->snsr[i].nvs_st);
3616 nvi_disable_irq(st);
3618 nvi_user_ctrl_en(st, __func__, false, false, false, false);
3619 nvi_pm(st, __func__, NVI_PM_OFF);
3621 if (st->sts & NVS_STS_SPEW_MSG)
3622 dev_info(&client->dev, "%s\n", __func__);
3625 static int nvi_remove(struct i2c_client *client)
3627 struct nvi_state *st = i2c_get_clientdata(client);
3631 nvi_shutdown(client);
3633 for (i = 0; i < DEV_N; i++)
3634 st->nvs->remove(st->snsr[i].nvs_st);
3638 dev_info(&client->dev, "%s\n", __func__);
3642 static struct nvi_id_hal nvi_id_hals[] = {
3643 { NVI_HW_ID_AUTO, NVI_NAME, &nvi_hal_6050 },
3644 { NVI_HW_ID_MPU6050, NVI_NAME_MPU6050, &nvi_hal_6050 },
3645 { NVI_HW_ID_MPU6500, NVI_NAME_MPU6500, &nvi_hal_6500 },
3646 { NVI_HW_ID_MPU6515, NVI_NAME_MPU6515, &nvi_hal_6515 },
3647 { NVI_HW_ID_MPU9150, NVI_NAME_MPU9150, &nvi_hal_6050 },
3648 { NVI_HW_ID_MPU9250, NVI_NAME_MPU9250, &nvi_hal_6500 },
3649 { NVI_HW_ID_MPU9350, NVI_NAME_MPU9350, &nvi_hal_6515 },
3650 { NVI_HW_ID_ICM20628, NVI_NAME_ICM20628, &nvi_hal_20628 },
3651 { NVI_HW_ID_ICM20630, NVI_NAME_ICM20630, &nvi_hal_20628 },
3652 { NVI_HW_ID_ICM20632, NVI_NAME_ICM20632, &nvi_hal_20628 },
3655 static int nvi_id2hal(struct nvi_state *st, u8 hw_id)
3659 for (i = 1; i < (int)ARRAY_SIZE(nvi_id_hals); i++) {
3660 if (nvi_id_hals[i].hw_id == hw_id) {
3661 st->hal = nvi_id_hals[i].hal;
3669 static int nvi_id_dev(struct nvi_state *st,
3670 const struct i2c_device_id *i2c_dev_id)
3672 u8 hw_id = NVI_HW_ID_AUTO;
3673 unsigned int i = i2c_dev_id->driver_data;
3678 BUG_ON(NVI_NDX_N != ARRAY_SIZE(nvi_i2c_device_id) - 1);
3679 BUG_ON(NVI_NDX_N != ARRAY_SIZE(nvi_id_hals));
3680 st->hal = nvi_id_hals[i].hal;
3681 if (i == NVI_NDX_AUTO) {
3682 nvi_pm_wr(st, __func__, 0, 0, 0);
3683 ret = nvi_i2c_rd(st, &st->hal->reg->who_am_i, &hw_id);
3685 dev_err(&st->i2c->dev, "%s AUTO ID FAILED\n",
3690 ret = nvi_id2hal(st, hw_id);
3692 st->hal = &nvi_hal_20628;
3693 /* cause a master reset by disabling regulators */
3694 nvs_vregs_disable(&st->i2c->dev, st->vreg,
3695 ARRAY_SIZE(nvi_vregs));
3696 ret = nvi_pm_wr(st, __func__, 0, 0, 0);
3697 ret = nvi_i2c_rd(st, &st->hal->reg->who_am_i, &hw_id);
3699 dev_err(&st->i2c->dev, "%s AUTO ID FAILED\n",
3704 ret = nvi_id2hal(st, hw_id);
3706 dev_err(&st->i2c->dev,
3707 "%s hw_id=%x AUTO ID FAILED\n",
3715 /* cause a master reset by disabling regulators */
3716 nvs_vregs_disable(&st->i2c->dev, st->vreg,
3717 ARRAY_SIZE(nvi_vregs));
3718 nvi_pm_wr(st, __func__, 0, 0, 0);
3721 /* populate the rest of st->snsr[dev].cfg */
3722 for (dev = 0; dev < DEV_N; dev++) {
3723 st->snsr[dev].cfg.part = nvi_id_hals[i].name;
3724 st->snsr[dev].cfg.version = st->hal->dev[dev]->version;
3725 st->snsr[dev].cfg.milliamp.ival =
3726 st->hal->dev[dev]->milliamp.ival;
3727 st->snsr[dev].cfg.milliamp.fval =
3728 st->hal->dev[dev]->milliamp.fval;
3731 #define SRM (SENSOR_FLAG_SPECIAL_REPORTING_MODE)
3732 #define OSM (SENSOR_FLAG_ONE_SHOT_MODE)
3733 BUG_ON(SRC_N < st->hal->src_n);
3734 for (dev = 0; dev < DEV_N; dev++) {
3735 src = st->hal->dev[dev]->src;
3739 BUG_ON(src >= st->hal->src_n);
3740 if ((st->snsr[dev].cfg.flags & SRM) != OSM) {
3741 st->snsr[dev].cfg.delay_us_min =
3742 st->hal->src[src].period_us_min;
3743 st->snsr[dev].cfg.delay_us_max =
3744 st->hal->src[src].period_us_max;
3748 ret = nvs_vregs_sts(st->vreg, ARRAY_SIZE(nvi_vregs));
3750 /* regulators aren't supported so manually do master reset */
3751 nvi_wr_pm1(st, __func__, BIT_H_RESET);
3752 for (i = 0; i < AXIS_N; i++) {
3753 st->rom_offset[DEV_ACC][i] = (s16)st->rc.accel_offset[i];
3754 st->rom_offset[DEV_GYR][i] = (s16)st->rc.gyro_offset[i];
3755 st->dev_offset[DEV_ACC][i] = 0;
3756 st->dev_offset[DEV_GYR][i] = 0;
3758 if (st->hal->fn->init)
3759 ret = st->hal->fn->init(st);
3762 if (hw_id == NVI_HW_ID_AUTO)
3763 dev_info(&st->i2c->dev, "%s: USING DEVICE TREE: %s\n",
3764 __func__, i2c_dev_id->name);
3766 dev_info(&st->i2c->dev, "%s: FOUND HW ID=%x USING: %s\n",
3767 __func__, hw_id, st->snsr[0].cfg.part);
3771 static struct sensor_cfg nvi_cfg_dflt[] = {
3773 .name = "accelerometer",
3779 .vendor = NVI_VENDOR,
3780 .float_significance = NVS_FLOAT_NANO,
3782 .thresh_hi = -1, /* LP */
3785 .name = "gyroscope",
3791 .vendor = NVI_VENDOR,
3795 .float_significance = NVS_FLOAT_NANO,
3799 .name = "gyro_temp",
3800 .snsr_id = SENSOR_TYPE_TEMPERATURE,
3803 .vendor = NVI_VENDOR,
3804 .flags = SENSOR_FLAG_ON_CHANGE_MODE,
3805 .float_significance = NVS_FLOAT_NANO,
3808 .name = "significant_motion",
3812 .vendor = NVI_VENDOR,
3814 /* delay_us_max is ignored by NVS since this is a one-shot
3815 * sensor so we use it as a third threshold parameter
3817 .delay_us_max = 200, /* SMD_DELAY2_THLD */
3818 .flags = SENSOR_FLAG_ONE_SHOT_MODE |
3819 SENSOR_FLAG_WAKE_UP,
3820 .thresh_lo = 1500, /* SMD_MOT_THLD */
3821 .thresh_hi = 600, /* SMD_DELAY_THLD */
3824 .name = "step_detector",
3828 .vendor = NVI_VENDOR,
3830 .flags = SENSOR_FLAG_ONE_SHOT_MODE,
3833 .name = "quaternion",
3834 .snsr_id = SENSOR_TYPE_ORIENTATION,
3838 .vendor = NVI_VENDOR,
3839 .delay_us_min = 10000,
3840 .delay_us_max = 255000,
3843 .name = "geomagnetic_rotation_vector",
3848 .vendor = NVI_VENDOR,
3849 .delay_us_min = 10000,
3850 .delay_us_max = 255000,
3853 .name = "gyroscope_uncalibrated",
3858 .vendor = NVI_VENDOR,
3859 .delay_us_min = 10000,
3860 .delay_us_max = 255000,
3864 /* device tree parameters before HAL initialized */
3865 static int nvi_of_dt_pre(struct nvi_state *st, struct device_node *dn)
3871 for (i = 0; i < ARRAY_SIZE(nvi_cfg_dflt); i++)
3872 memcpy(&st->snsr[i].cfg, &nvi_cfg_dflt[i],
3873 sizeof(st->snsr[i].cfg));
3874 st->snsr[DEV_AUX].cfg.name = "auxiliary";
3875 st->en_msk = (1 << EN_STDBY);
3876 st->bypass_timeout_ms = NVI_BYPASS_TIMEOUT_MS;
3880 /* driver specific parameters */
3881 if (!of_property_read_u32(dn, "standby_en", &tmp)) {
3883 st->en_msk |= (1 << EN_STDBY);
3885 st->en_msk &= ~(1 << EN_STDBY);
3887 of_property_read_u32(dn, "bypass_timeout_ms", &st->bypass_timeout_ms);
3888 for (i = 0; i < DEV_N_AUX; i++) {
3889 sprintf(str, "%s_push_delay_ns", st->snsr[i].cfg.name);
3890 of_property_read_u32(dn, str,
3891 (u32 *)&st->snsr[i].push_delay_ns);
3897 /* device tree parameters after HAL initialized */
3898 static void nvi_of_dt_post(struct nvi_state *st, struct device_node *dn)
3906 /* sensor specific parameters */
3907 for (i = 0; i < DEV_N; i++)
3908 nvs_of_dt(dn, &st->snsr[i].cfg, NULL);
3910 for (i = 0; i < DEV_N; i++) {
3912 for (j = 0; j < 9; j++)
3913 tmp |= st->snsr[i].cfg.matrix[j];
3915 /* sensor has a matrix */
3916 sprintf(str, "%s_matrix_enable", st->snsr[i].cfg.name);
3917 if (!of_property_read_u32(dn, str, &tmp)) {
3918 /* matrix override */
3920 /* apply matrix within kernel */
3921 st->snsr[i].matrix = true;
3923 /* HAL/fusion will handle matrix */
3924 st->snsr[i].matrix = false;
3929 /* sensor overrides that enable the DMP.
3930 * If the sensor is specific to the DMP and this override is
3931 * disable, then the virtual sensor is removed.
3934 st->dmp_dev_msk = st->hal->dmp->dev_msk;
3935 st->dmp_en_msk = st->hal->dmp->en_msk;
3936 for (i = 0; i < DEV_N_AUX; i++) {
3937 sprintf(str, "%s_dmp_en",
3938 st->snsr[i].cfg.name);
3939 if (!of_property_read_u32(dn, str, &tmp)) {
3942 if (MSK_DEV_DMP & msk)
3943 st->dmp_dev_msk |= msk;
3944 st->dmp_en_msk |= msk;
3947 if (MSK_DEV_DMP & (1 << i))
3948 st->dmp_dev_msk &= msk;
3949 st->dmp_en_msk &= msk;
3956 static int nvi_init(struct nvi_state *st,
3957 const struct i2c_device_id *i2c_dev_id)
3959 struct mpu_platform_data *pdata;
3960 signed char matrix[9];
3965 nvi_of_dt_pre(st, st->i2c->dev.of_node);
3967 ret = nvi_id_dev(st, i2c_dev_id);
3971 if (st->i2c->dev.of_node) {
3972 nvi_of_dt_post(st, st->i2c->dev.of_node);
3974 pdata = dev_get_platdata(&st->i2c->dev);
3976 memcpy(&st->snsr[DEV_ACC].cfg.matrix,
3977 &pdata->orientation,
3978 sizeof(st->snsr[DEV_ACC].cfg.matrix));
3979 memcpy(&st->snsr[DEV_GYR].cfg.matrix,
3980 &pdata->orientation,
3981 sizeof(st->snsr[DEV_GYR].cfg.matrix));
3983 dev_err(&st->i2c->dev, "%s dev_get_platdata ERR\n",
3989 if (st->en_msk & (1 << FW_LOADED))
3992 ret = nvi_dmp_fw(st);
3994 /* remove DMP dependent sensors */
3997 dev_info(&st->i2c->dev, "%s DMP FW loaded\n", __func__);
3998 /* remove DMP dependent sensors not supported by this DMP */
3999 n = MSK_DEV_DMP ^ st->dmp_dev_msk;
4002 for (i = 0; i < DEV_N; i++) {
4004 st->snsr[i].cfg.snsr_id = -1;
4008 nvi_nvs_fn.sts = &st->sts;
4009 nvi_nvs_fn.errs = &st->errs;
4010 st->nvs = nvs_iio();
4011 if (st->nvs == NULL)
4015 for (i = 0; i < DEV_N; i++) {
4016 if (st->snsr[i].matrix) {
4017 /* matrix handled at kernel so remove from NVS */
4018 memcpy(matrix, st->snsr[i].cfg.matrix, sizeof(matrix));
4019 memset(st->snsr[i].cfg.matrix, 0,
4020 sizeof(st->snsr[i].cfg.matrix));
4022 ret = st->nvs->probe(&st->snsr[i].nvs_st, st, &st->i2c->dev,
4023 &nvi_nvs_fn, &st->snsr[i].cfg);
4025 st->snsr[i].cfg.snsr_id = i;
4026 if (st->snsr[i].matrix)
4027 memcpy(st->snsr[i].cfg.matrix, matrix,
4028 sizeof(st->snsr[i].cfg.matrix));
4029 nvi_max_range(st, i, st->snsr[i].cfg.max_range.ival);
4036 ret = request_threaded_irq(st->i2c->irq, nvi_handler, nvi_thread,
4037 IRQF_TRIGGER_RISING, NVI_NAME, st);
4039 dev_err(&st->i2c->dev, "%s req_threaded_irq ERR %d\n",
4044 nvi_pm(st, __func__, NVI_PM_AUTO);
4045 nvi_state_local = st;
4049 static void nvi_dmp_fw_load_worker(struct work_struct *work)
4051 struct nvi_pdata *pd = container_of(work, struct nvi_pdata,
4053 struct nvi_state *st = &pd->st;
4056 ret = nvi_init(st, pd->i2c_dev_id);
4058 dev_err(&st->i2c->dev, "%s ERR %d\n", __func__, ret);
4059 nvi_remove(st->i2c);
4061 dev_info(&st->i2c->dev, "%s done\n", __func__);
4064 static int nvi_probe(struct i2c_client *client,
4065 const struct i2c_device_id *i2c_dev_id)
4067 struct nvi_pdata *pd;
4068 struct nvi_state *st;
4071 dev_info(&client->dev, "%s %s\n", __func__, i2c_dev_id->name);
4073 dev_err(&client->dev, "%s ERR: no interrupt\n", __func__);
4077 /* just test if global disable */
4078 ret = nvs_of_dt(client->dev.of_node, NULL, NULL);
4079 if (ret == -ENODEV) {
4080 dev_info(&client->dev, "%s DT disabled\n", __func__);
4084 pd = devm_kzalloc(&client->dev, sizeof(*pd), GFP_KERNEL);
4089 i2c_set_clientdata(client, pd);
4091 pd->i2c_dev_id = i2c_dev_id;
4092 /* Init fw load worker thread */
4093 INIT_WORK(&pd->fw_load_work, nvi_dmp_fw_load_worker);
4094 schedule_work(&pd->fw_load_work);
4098 MODULE_DEVICE_TABLE(i2c, nvi_i2c_device_id);
4100 static const struct of_device_id nvi_of_match[] = {
4101 { .compatible = "invensense,mpu6xxx", },
4102 { .compatible = "invensense,mpu6050", },
4103 { .compatible = "invensense,mpu6500", },
4104 { .compatible = "invensense,mpu6515", },
4105 { .compatible = "invensense,mpu9150", },
4106 { .compatible = "invensense,mpu9250", },
4107 { .compatible = "invensense,mpu9350", },
4108 { .compatible = "invensense,icm20628", },
4109 { .compatible = "invensense,icm20630", },
4110 { .compatible = "invensense,icm20632", },
4114 MODULE_DEVICE_TABLE(of, nvi_of_match);
4116 static struct i2c_driver nvi_i2c_driver = {
4117 .class = I2C_CLASS_HWMON,
4119 .remove = nvi_remove,
4120 .shutdown = nvi_shutdown,
4123 .owner = THIS_MODULE,
4124 .of_match_table = of_match_ptr(nvi_of_match),
4127 .id_table = nvi_i2c_device_id,
4130 module_i2c_driver(nvi_i2c_driver);
4132 MODULE_LICENSE("GPL");
4133 MODULE_DESCRIPTION("NVidiaInvensense driver");
4134 MODULE_AUTHOR("NVIDIA Corporation");