1 /* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
3 * This software is licensed under the terms of the GNU General Public
4 * License version 2, as published by the Free Software Foundation, and
5 * may be copied, distributed, and modified under those terms.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 /* NVS = NVidia Sensor framework */
14 /* See nvs_iio.c and nvs.h for documentation */
17 #include <linux/i2c.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/kernel.h>
21 #include <linux/err.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/regulator/consumer.h>
26 #include <linux/nvs.h>
27 #include <linux/crc32.h>
28 #include <linux/mpu_iio.h>
32 #define NVI_DRIVER_VERSION (336)
33 #define NVI_VENDOR "Invensense"
34 #define NVI_NAME "mpu6xxx"
35 #define NVI_NAME_MPU6050 "mpu6050"
36 #define NVI_NAME_MPU6500 "mpu6500"
37 #define NVI_NAME_MPU6515 "mpu6515"
38 #define NVI_NAME_MPU9150 "mpu9150"
39 #define NVI_NAME_MPU9250 "mpu9250"
40 #define NVI_NAME_MPU9350 "mpu9350"
41 #define NVI_NAME_ICM20628 "icm20628"
42 #define NVI_NAME_ICM20630 "icm20630"
43 #define NVI_NAME_ICM20632 "icm20632"
44 #define NVI_HW_ID_AUTO (0xFF)
45 #define NVI_HW_ID_MPU6050 (0x68)
46 #define NVI_HW_ID_MPU6500 (0x70)
47 #define NVI_HW_ID_MPU6515 (0x74)
48 #define NVI_HW_ID_MPU9150 (0x68)
49 #define NVI_HW_ID_MPU9250 (0x71)
50 #define NVI_HW_ID_MPU9350 (0x72)
51 #define NVI_HW_ID_ICM20628 (0xA2)
52 #define NVI_HW_ID_ICM20630 (0xAB)
53 #define NVI_HW_ID_ICM20632 (0xAD)
54 /* NVI_FW_CRC_CHECK used only during development to confirm valid FW */
55 #define NVI_FW_CRC_CHECK (0)
59 struct work_struct fw_load_work;
60 const struct i2c_device_id *i2c_dev_id;
66 const struct nvi_hal *hal;
68 /* ARRAY_SIZE(nvi_id_hals) must match ARRAY_SIZE(nvi_i2c_device_id) - 1 */
82 /* enum NVI_NDX_N must match ARRAY_SIZE(nvi_i2c_device_id) - 1 */
83 static struct i2c_device_id nvi_i2c_device_id[] = {
84 { NVI_NAME, NVI_NDX_AUTO },
85 { NVI_NAME_MPU6050, NVI_NDX_MPU6050 },
86 { NVI_NAME_MPU6500, NVI_NDX_MPU6500 },
87 { NVI_NAME_MPU6515, NVI_NDX_MPU6515 },
88 { NVI_NAME_MPU9150, NVI_NDX_MPU9150 },
89 { NVI_NAME_MPU9250, NVI_NDX_MPU9250 },
90 { NVI_NAME_MPU9350, NVI_NDX_MPU9350 },
91 { NVI_NAME_ICM20628, NVI_NDX_ICM20628 },
92 { NVI_NAME_ICM20630, NVI_NDX_ICM20630 },
93 { NVI_NAME_ICM20632, NVI_NDX_ICM20632 },
105 NVI_INFO_REG_WR = 0xC6, /* use 0xD0 on cmd line */
113 /* regulator names in order of powering on */
114 static char *nvi_vregs[] = {
119 static struct nvi_state *nvi_state_local;
122 static int nvi_dmp_fw(struct nvi_state *st);
123 static int nvi_aux_bypass_enable(struct nvi_state *st, bool enable);
124 static int nvi_read(struct nvi_state *st, bool flush);
126 static int nvi_nb_vreg(struct nvi_state *st,
127 unsigned long event, unsigned int i)
129 if (event & REGULATOR_EVENT_POST_ENABLE)
130 st->ts_vreg_en[i] = nvs_timestamp();
131 else if (event & (REGULATOR_EVENT_DISABLE |
132 REGULATOR_EVENT_FORCE_DISABLE))
133 st->ts_vreg_en[i] = 0;
134 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
135 dev_info(&st->i2c->dev, "%s %s event=0x%x ts=%lld\n",
136 __func__, st->vreg[i].supply, (unsigned int)event,
141 static int nvi_nb_vreg_vdd(struct notifier_block *nb,
142 unsigned long event, void *ignored)
144 struct nvi_state *st = container_of(nb, struct nvi_state, nb_vreg[0]);
146 return nvi_nb_vreg(st, event, 0);
149 static int nvi_nb_vreg_vlogic(struct notifier_block *nb,
150 unsigned long event, void *ignored)
152 struct nvi_state *st = container_of(nb, struct nvi_state, nb_vreg[1]);
154 return nvi_nb_vreg(st, event, 1);
157 static int (* const nvi_nb_vreg_pf[])(struct notifier_block *nb,
158 unsigned long event, void *ignored) = {
163 void nvi_err(struct nvi_state *st)
170 static void nvi_mutex_lock(struct nvi_state *st)
175 for (i = 0; i < DEV_N; i++)
176 st->nvs->nvs_mutex_lock(st->snsr[i].nvs_st);
180 static void nvi_mutex_unlock(struct nvi_state *st)
185 for (i = 0; i < DEV_N; i++)
186 st->nvs->nvs_mutex_unlock(st->snsr[i].nvs_st);
190 static void nvi_disable_irq(struct nvi_state *st)
192 if (st->i2c->irq && !st->irq_dis) {
193 disable_irq_nosync(st->i2c->irq);
195 if (st->sts & NVS_STS_SPEW_MSG)
196 dev_info(&st->i2c->dev, "%s IRQ disabled\n", __func__);
200 static void nvi_enable_irq(struct nvi_state *st)
202 if (st->i2c->irq && st->irq_dis) {
203 enable_irq(st->i2c->irq);
205 if (st->sts & NVS_STS_SPEW_MSG)
206 dev_info(&st->i2c->dev, "%s IRQ enabled\n", __func__);
210 static void nvi_rc_clr(struct nvi_state *st, const char *fn)
214 for (i = 0; i < ARRAY_SIZE(st->rc_msk); i++)
216 if (st->sts & NVI_DBG_SPEW_MSG)
217 dev_info(&st->i2c->dev, "%s-%s\n", __func__, fn);
220 static int nvi_i2c_w(struct nvi_state *st, u16 len, u8 *buf)
224 msg.addr = st->i2c->addr;
228 if (i2c_transfer(st->i2c->adapter, &msg, 1) != 1) {
236 static int nvi_wr_reg_bank_sel(struct nvi_state *st, u8 reg_bank)
242 if (!st->hal->reg->reg_bank.reg)
246 if (st->rc_msk[NVI_RC_BANK_REG_BANK] & NVI_RC_MSK_REG_BANK) {
247 if (reg_bank == st->rc.reg_bank)
251 buf[0] = st->hal->reg->reg_bank.reg;
253 ret = nvi_i2c_w(st, sizeof(buf), buf);
255 dev_err(&st->i2c->dev, "%s 0x%x!->0x%x ERR=%d\n",
256 __func__, st->rc.reg_bank, reg_bank, ret);
257 st->rc_msk[NVI_RC_BANK_REG_BANK] &=
258 ~NVI_RC_MSK_REG_BANK;
260 if (st->sts & NVI_DBG_SPEW_MSG)
261 dev_info(&st->i2c->dev, "%s 0x%x->0x%x\n",
262 __func__, st->rc.reg_bank, reg_bank);
263 st->rc.reg_bank = reg_bank;
264 st->rc_msk[NVI_RC_BANK_REG_BANK] |=
271 static int nvi_i2c_write(struct nvi_state *st, u8 bank, u16 len, u8 *buf)
275 ret = nvi_wr_reg_bank_sel(st, bank);
277 ret = nvi_i2c_w(st, len, buf);
281 static int nvi_i2c_write_be(struct nvi_state *st, const struct nvi_br *br,
288 for (i = len; i > 0; i--)
289 buf[i] = (u8)(val >> (8 * (len - i)));
290 return nvi_i2c_write(st, br->bank, len + 1, buf);
293 static int nvi_i2c_write_le(struct nvi_state *st, const struct nvi_br *br,
300 for (i = 0; i < len; i++)
301 buf[i + 1] = (u8)(val >> (8 * i));
302 return nvi_i2c_write(st, br->bank, len + 1, buf);
305 int nvi_i2c_write_rc(struct nvi_state *st, const struct nvi_br *br, u32 val,
306 const char *fn, u8 *rc, bool be)
311 unsigned int rc_bank;
319 rc_bank <<= 7; /* registers only go to 0x7F */
321 rc_msk = ((1 << len) - 1) << (rc_bank % 64);
327 if ((st->rc_msk[rc_bank] & rc_msk) == rc_msk) {
328 /* register is cached */
329 for (i = 0; i < len; i++) {
331 (u8)(val >> (8 * i))) {
332 /* register data changed */
338 /* register not cached */
347 ret = nvi_i2c_write_be(st, br, len, val);
349 ret = nvi_i2c_write_le(st, br, len, val);
353 dev_err(&st->i2c->dev,
354 "%s 0x%08x!=>0x%01x%02x ERR=%d\n",
355 fn, val, br->bank, br->reg, ret);
356 st->rc_msk[rc_bank] &= ~rc_msk;
358 if (st->sts & NVI_DBG_SPEW_MSG && fn)
359 dev_info(&st->i2c->dev,
360 "%s 0x%08x=>0x%01x%02x\n",
361 fn, val, br->bank, br->reg);
363 for (i = 0; i < len; i++)
364 *(rc + i) = (u8)(val >> (8 * i));
365 st->rc_msk[rc_bank] |= rc_msk;
367 /* register data not cached */
368 st->rc_msk[rc_bank] &= ~rc_msk;
375 int nvi_i2c_wr(struct nvi_state *st, const struct nvi_br *br,
376 u8 val, const char *fn)
382 buf[1] = val | br->dflt;
383 ret = nvi_wr_reg_bank_sel(st, br->bank);
385 ret = nvi_i2c_w(st, sizeof(buf), buf);
389 dev_err(&st->i2c->dev,
390 "%s 0x%02x!=>0x%01x%02x ERR=%d\n",
391 fn, val, br->bank, br->reg, ret);
393 if (st->sts & NVI_DBG_SPEW_MSG && fn)
394 dev_info(&st->i2c->dev,
395 "%s 0x%02x=>0x%01x%02x\n",
396 fn, val, br->bank, br->reg);
402 int nvi_i2c_wr_rc(struct nvi_state *st, const struct nvi_br *br,
403 u8 val, const char *fn, u8 *rc)
407 unsigned int rc_bank;
412 rc_bank <<= 7; /* registers only go to 0x7F */
414 rc_msk = 1 << (rc_bank % 64);
419 if (st->rc_msk[rc_bank] & rc_msk) {
420 /* register is cached */
422 /* register data changed */
425 /* register not cached */
433 ret = nvi_i2c_wr(st, br, val, fn);
435 st->rc_msk[rc_bank] &= ~rc_msk;
439 st->rc_msk[rc_bank] |= rc_msk;
441 st->rc_msk[rc_bank] &= ~rc_msk;
448 int nvi_i2c_r(struct nvi_state *st, u8 bank, u8 reg, u16 len, u8 *buf)
450 struct i2c_msg msg[2];
453 ret = nvi_wr_reg_bank_sel(st, bank);
459 msg[0].addr = st->i2c->addr;
463 msg[1].addr = st->i2c->addr;
464 msg[1].flags = I2C_M_RD;
467 if (i2c_transfer(st->i2c->adapter, msg, 2) != 2) {
475 int nvi_i2c_rd(struct nvi_state *st, const struct nvi_br *br, u8 *buf)
481 return nvi_i2c_r(st, br->bank, br->reg, len, buf);
484 int nvi_mem_wr(struct nvi_state *st, u16 addr, u16 len, u8 *data,
487 struct i2c_msg msg[6];
496 ret = nvi_wr_reg_bank_sel(st, st->hal->reg->mem_bank.bank);
500 buf_bank[0] = st->hal->reg->mem_bank.reg;
501 buf_bank[1] = addr >> 8;
502 buf_addr[0] = st->hal->reg->mem_addr.reg;
503 buf_addr[1] = addr & 0xFF;
504 buf_data[0] = st->hal->reg->mem_rw.reg;
505 msg[0].addr = st->i2c->addr;
507 msg[0].len = sizeof(buf_bank);
508 msg[0].buf = buf_bank;
509 msg[1].addr = st->i2c->addr;
511 msg[1].len = sizeof(buf_addr);
512 msg[1].buf = buf_addr;
513 msg[2].addr = st->i2c->addr;
515 msg[2].buf = buf_data;
516 msg[3].addr = st->i2c->addr;
518 msg[3].len = sizeof(buf_addr);
519 msg[3].buf = buf_addr;
520 msg[4].addr = st->i2c->addr;
523 msg[4].buf = buf_data;
524 msg[5].addr = st->i2c->addr;
525 msg[5].flags = I2C_M_RD;
526 msg[5].buf = &buf_data[1];
528 bank_len = (addr + len - 1) >> 8;
529 for (; buf_bank[1] <= bank_len; buf_bank[1]++) {
530 if (buf_bank[1] == bank_len)
531 data_len = len - data_i;
533 data_len = 0x0100 - buf_addr[1];
534 msg[2].len = data_len + 1;
535 memcpy(&buf_data[1], data + data_i, data_len);
536 if (i2c_transfer(st->i2c->adapter, msg, 3) != 3) {
542 msg[5].len = data_len;
543 if (i2c_transfer(st->i2c->adapter, &msg[3], 3) != 3) {
548 ret = memcmp(&buf_data[1], data + data_i, data_len);
560 int nvi_mem_wr_be(struct nvi_state *st, u16 addr, u16 len, u32 val)
566 for (i = 0; i < len; i++)
567 buf[i] = (u8)(val >> (8 * (len - (i + 1))));
568 ret = nvi_mem_wr(st, addr, len, buf, false);
569 if (st->sts & NVI_DBG_SPEW_MSG)
570 dev_info(&st->i2c->dev, "%s 0x%08x=>0x%04hx err=%d\n",
571 __func__, val, addr, ret);
575 int nvi_mem_wr_be_mc(struct nvi_state *st, u16 addr, u16 len, u32 val, u32 *mc)
579 if (val != *mc || st->mc_dis) {
580 ret = nvi_mem_wr_be(st, addr, len, val);
587 int nvi_mem_rd(struct nvi_state *st, u16 addr, u16 len, u8 *data)
589 struct i2c_msg msg[4];
597 ret = nvi_wr_reg_bank_sel(st, st->hal->reg->mem_bank.bank);
601 buf_bank[0] = st->hal->reg->mem_bank.reg;
602 buf_bank[1] = addr >> 8;
603 buf_addr[0] = st->hal->reg->mem_addr.reg;
604 buf_addr[1] = addr & 0xFF;
605 msg[0].addr = st->i2c->addr;
607 msg[0].len = sizeof(buf_bank);
608 msg[0].buf = buf_bank;
609 msg[1].addr = st->i2c->addr;
611 msg[1].len = sizeof(buf_addr);
612 msg[1].buf = buf_addr;
613 msg[2].addr = st->i2c->addr;
616 msg[2].buf = (u8 *)&st->hal->reg->mem_rw.reg;
617 msg[3].addr = st->i2c->addr;
618 msg[3].flags = I2C_M_RD;
620 bank_len = (addr + len - 1) >> 8;
621 for (; buf_bank[1] <= bank_len; buf_bank[1]++) {
622 if (buf_bank[1] == bank_len)
623 data_len = len - data_i;
625 data_len = 0x0100 - buf_addr[1];
626 msg[3].len = data_len;
627 msg[3].buf = data + data_i;
628 if (i2c_transfer(st->i2c->adapter, msg, 4) != 4) {
640 int nvi_mem_rd_le(struct nvi_state *st, u16 addr, u16 len, u32 *val)
647 ret = nvi_mem_rd(st, addr, len, buf_rd);
649 /* convert to little endian */
650 for (i = 0; i < len; i++) {
661 static int nvi_rd_accel_offset(struct nvi_state *st)
667 for (i = 0; i < AXIS_N; i++) {
668 ret = nvi_i2c_rd(st, &st->hal->reg->a_offset_h[i], buf);
670 st->rc.accel_offset[i] = be16_to_cpup((__be16 *)buf);
675 int nvi_wr_accel_offset(struct nvi_state *st, unsigned int axis, u16 offset)
677 return nvi_i2c_write_rc(st, &st->hal->reg->a_offset_h[axis], offset,
678 __func__, (u8 *)&st->rc.accel_offset[axis], true);
681 static int nvi_rd_gyro_offset(struct nvi_state *st)
687 for (i = 0; i < AXIS_N; i++) {
688 ret = nvi_i2c_rd(st, &st->hal->reg->g_offset_h[i], buf);
690 st->rc.gyro_offset[i] = be16_to_cpup((__be16 *)buf);
695 int nvi_wr_gyro_offset(struct nvi_state *st, unsigned int axis, u16 offset)
697 return nvi_i2c_write_rc(st, &st->hal->reg->g_offset_h[axis], offset,
698 __func__, (u8 *)&st->rc.gyro_offset[axis], true);
701 int nvi_wr_fifo_cfg(struct nvi_state *st, int fifo)
705 if (!st->hal->reg->fifo_cfg.reg)
709 fifo_cfg = (fifo << 2) | 0x01;
712 return nvi_i2c_wr_rc(st, &st->hal->reg->fifo_cfg, fifo_cfg,
713 NULL, &st->rc.fifo_cfg);
716 static int nvi_wr_i2c_slv4_ctrl(struct nvi_state *st, bool slv4_en)
720 val = st->aux.delay_hw;
721 val |= (st->aux.port[AUX_PORT_IO].nmp.ctrl & BIT_I2C_SLV_REG_DIS);
724 return nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv4_ctrl, val,
725 __func__, &st->rc.i2c_slv4_ctrl);
728 static int nvi_rd_int_sts_dmp(struct nvi_state *st)
732 ret = nvi_i2c_rd(st, &st->hal->reg->int_dmp, &st->rc.int_dmp);
734 dev_err(&st->i2c->dev, "%s %x=ERR %d\n",
735 __func__, st->hal->reg->int_dmp.reg, ret);
739 static int nvi_rd_int_status(struct nvi_state *st)
741 u8 buf[4] = {0, 0, 0, 0};
746 ret = nvi_i2c_rd(st, &st->hal->reg->int_status, buf);
748 dev_err(&st->i2c->dev, "%s %x=ERR %d\n",
749 __func__, st->hal->reg->int_status.reg, ret);
751 /* convert to little endian */
752 st->rc.int_status = 0;
753 n = st->hal->reg->int_status.len;
756 for (i = 0; i < n; i++) {
757 st->rc.int_status <<= 8;
758 st->rc.int_status |= buf[i];
761 if (st->rc.int_status & (1 << st->hal->bit->int_dmp))
762 ret = nvi_rd_int_sts_dmp(st);
768 int nvi_int_able(struct nvi_state *st, const char *fn, bool en)
777 if (st->en_msk & (1 << DEV_DMP)) {
778 int_en |= 1 << st->hal->bit->int_dmp;
779 } else if (st->en_msk & MSK_DEV_ALL) {
780 int_msk = 1 << st->hal->bit->int_data_rdy_0;
781 if (st->rc.fifo_cfg & 0x01) {
782 /* multi FIFO enabled */
784 for (; fifo < st->hal->fifo_n; fifo++) {
785 dev = st->hal->fifo_dev[fifo];
789 if (st->rc.fifo_en & st->hal->
790 dev[dev]->fifo_en_msk)
791 int_en |= int_msk << fifo;
798 ret = nvi_i2c_write_rc(st, &st->hal->reg->int_enable, int_en,
799 __func__, (u8 *)&st->rc.int_enable, false);
800 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
801 dev_info(&st->i2c->dev, "%s-%s en=%x int_en=%x err=%d\n",
802 __func__, fn, en, int_en, ret);
806 static void nvi_flush_aux(struct nvi_state *st, int port)
808 struct aux_port *ap = &st->aux.port[port];
811 ap->nmp.handler(NULL, 0, 0, ap->nmp.ext_driver);
814 static void nvi_flush_push(struct nvi_state *st)
820 for (i = 0; i < DEV_N; i++) {
821 if (st->snsr[i].flush) {
822 ret = st->nvs->handler(st->snsr[i].nvs_st, NULL, 0LL);
824 st->snsr[i].flush = false;
827 for (i = 0; i < AUX_PORT_IO; i++) {
828 ap = &st->aux.port[i];
830 nvi_flush_aux(st, i);
835 static int nvi_user_ctrl_rst(struct nvi_state *st, u8 user_ctrl)
844 if (user_ctrl & BIT_SIG_COND_RST)
845 user_ctrl = BITS_USER_CTRL_RST;
846 if (user_ctrl & BIT_DMP_RST)
847 user_ctrl |= BIT_FIFO_RST;
848 if (user_ctrl & BIT_FIFO_RST) {
850 if (st->hal->reg->fifo_rst.reg) {
852 if (st->en_msk & (1 << DEV_DMP)) {
853 ret = nvi_wr_fifo_cfg(st,
854 st->hal->dmp->fifo_mode);
857 for (i = 0; i < DEV_AXIS_N; i++) {
858 if (st->hal->dev[i]->fifo_en_msk &&
863 msk = st->snsr[DEV_AUX].enable;
864 msk |= st->aux.dmp_en_msk;
865 if (st->hal->dev[DEV_AUX]->fifo_en_msk && msk)
868 ret = nvi_wr_fifo_cfg(st, 0);
870 ret = nvi_wr_fifo_cfg(st, -1);
872 if (st->icm_fifo_off) {
873 ret |= nvi_i2c_wr(st, &st->hal->reg->fifo_rst,
875 ret |= nvi_i2c_wr(st, &st->hal->reg->fifo_rst,
877 st->icm_fifo_off = false;
879 if (st->en_msk & (1 << DEV_DMP))
883 ret |= nvi_i2c_wr(st, &st->hal->reg->fifo_rst,
885 ret |= nvi_i2c_wr(st, &st->hal->reg->fifo_rst,
891 if (user_ctrl == BIT_FIFO_RST)
895 user_ctrl &= ~BIT_FIFO_RST;
899 ret = nvi_i2c_wr(st, &st->hal->reg->user_ctrl, user_ctrl, __func__);
903 if (user_ctrl & BIT_FIFO_RST)
905 for (i = 0; i < POWER_UP_TIME; i++) {
907 ret = nvi_i2c_rd(st, &st->hal->reg->user_ctrl,
909 if (!(user_ctrl & BITS_USER_CTRL_RST))
915 st->rc.user_ctrl = user_ctrl;
916 if (user_ctrl & BIT_DMP_RST && st->hal->dmp) {
917 if (st->hal->dmp->dmp_reset_delay_ms)
918 msleep(st->hal->dmp->dmp_reset_delay_ms);
925 int nvi_user_ctrl_en(struct nvi_state *st, const char *fn,
926 bool en_dmp, bool en_fifo, bool en_i2c, bool en_irq)
934 if (!(st->en_msk & (1 << DEV_DMP)))
937 if (en_fifo && !en_dmp) {
938 for (i = 0; i < st->hal->src_n; i++)
939 st->src[i].fifo_data_n = 0;
941 for (i = 0; i < DEV_MPU_N; i++) {
942 if (st->snsr[i].enable &&
943 st->hal->dev[i]->fifo_en_msk) {
944 val |= st->hal->dev[i]->fifo_en_msk;
945 st->src[st->hal->dev[i]->src].fifo_data_n +=
946 st->hal->dev[i]->fifo_data_n;
947 st->fifo_src = st->hal->dev[i]->src;
951 if (st->hal->dev[DEV_AUX]->fifo_en_msk &&
952 st->snsr[DEV_AUX].enable) {
953 st->src[st->hal->dev[DEV_AUX]->src].fifo_data_n +=
955 st->fifo_src = st->hal->dev[DEV_AUX]->src;
956 for (i = 0; i < AUX_PORT_IO; i++) {
957 ap = &st->aux.port[i];
958 if (st->snsr[DEV_AUX].enable & (1 << i) &&
959 ap->nmp.addr & BIT_I2C_READ)
961 st->hal->bit->slv_fifo_en[i]);
968 ret |= nvi_i2c_write_rc(st, &st->hal->reg->fifo_en, val,
969 __func__, (u8 *)&st->rc.fifo_en, false);
976 if (en_i2c && (st->en_msk & (1 << DEV_AUX)))
977 val |= BIT_I2C_MST_EN;
981 ret = nvi_int_able(st, __func__, true);
984 ret |= nvi_i2c_wr_rc(st, &st->hal->reg->user_ctrl, val,
985 __func__, &st->rc.user_ctrl);
987 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
988 dev_info(&st->i2c->dev,
989 "%s-%s DMP=%x FIFO=%x I2C=%x IRQ=%x err=%d\n",
990 __func__, fn, en_dmp, en_fifo, en_i2c, en_irq, ret);
994 int nvi_wr_pm1(struct nvi_state *st, const char *fn, u8 pm1)
1000 if (pm1 & BIT_H_RESET) {
1001 /* must make sure FIFO is off or IRQ storm will occur */
1002 ret = nvi_int_able(st, __func__, false);
1003 ret |= nvi_user_ctrl_en(st, __func__,
1004 false, false, false, false);
1006 nvi_user_ctrl_rst(st, BITS_USER_CTRL_RST);
1007 ret = nvi_i2c_wr(st, &st->hal->reg->pm1,
1008 BIT_H_RESET, __func__);
1011 ret = nvi_i2c_wr_rc(st, &st->hal->reg->pm1, pm1,
1012 __func__, &st->rc.pm1);
1014 st->pm = NVI_PM_ERR;
1015 if (pm1 & BIT_H_RESET && !ret) {
1016 st->en_msk &= MSK_RST;
1017 nvi_rc_clr(st, __func__);
1019 for (i = 0; i < st->hal->src_n; i++)
1020 st->src[i].period_us_req = 0;
1022 for (i = 0; i < (POWER_UP_TIME / REG_UP_TIME); i++) {
1023 mdelay(REG_UP_TIME);
1025 ret = nvi_i2c_rd(st, &st->hal->reg->pm1, &pm1_rd);
1026 if ((!ret) && (!(pm1_rd & BIT_H_RESET)))
1031 nvi_rd_accel_offset(st);
1032 nvi_rd_gyro_offset(st);
1035 if (st->sts & NVI_DBG_SPEW_MSG)
1036 dev_info(&st->i2c->dev, "%s-%s pm1=%x err=%d\n",
1037 __func__, fn, pm1, ret);
1041 static int nvi_pm_w(struct nvi_state *st, u8 pm1, u8 pm2, u8 lp)
1044 unsigned int delay_ms;
1048 ret = nvs_vregs_enable(&st->i2c->dev, st->vreg, ARRAY_SIZE(nvi_vregs));
1051 for (i = 0; i < ARRAY_SIZE(nvi_vregs); i++) {
1052 por_ns = nvs_timestamp() - st->ts_vreg_en[i];
1053 if ((por_ns < 0) || (!st->ts_vreg_en[i])) {
1054 delay_ms = (POR_MS * 1000000);
1058 if (por_ns < (POR_MS * 1000000)) {
1059 por_ns = (POR_MS * 1000000) - por_ns;
1060 if (por_ns > delay_ms)
1061 delay_ms = (unsigned int)por_ns;
1064 delay_ms /= 1000000;
1065 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1066 dev_info(&st->i2c->dev, "%s %ums delay\n",
1067 __func__, delay_ms);
1070 ret = nvi_wr_pm1(st, __func__, BIT_H_RESET);
1072 ret |= st->hal->fn->pm(st, pm1, pm2, lp);
1076 int nvi_pm_wr(struct nvi_state *st, const char *fn, u8 pm1, u8 pm2, u8 lp)
1080 ret = nvi_pm_w(st, pm1, pm2, lp);
1081 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1082 dev_info(&st->i2c->dev, "%s-%s PM1=%x PM2=%x LPA=%x err=%d\n",
1083 __func__, fn, pm1, pm2, lp, ret);
1084 st->pm = NVI_PM_ERR; /* lost st->pm status: nvi_pm is being bypassed */
1090 * @param pm_req: call with one of the following:
1091 * NVI_PM_OFF_FORCE = force off state
1092 * NVI_PM_ON = minimum power for device access
1093 * NVI_PM_ON_FULL = power for gyro
1094 * NVI_PM_AUTO = automatically sets power after
1096 * Typical use is to set needed power for configuration and
1097 * then call with NVI_PM_AUTO when done. All other NVI_PM_
1098 * levels are handled automatically and are for internal
1100 * @return int: returns 0 for success or error code
1102 static int nvi_pm(struct nvi_state *st, const char *fn, int pm_req)
1111 lp = st->rc.lp_config;
1112 if (pm_req == NVI_PM_AUTO) {
1114 if (!(st->en_msk & MSK_PM_ACC_EN))
1115 pm2 |= BIT_PWR_ACCEL_STBY;
1116 if (!st->snsr[DEV_GYR].enable)
1117 pm2 |= BIT_PWR_GYRO_STBY;
1118 if (st->en_msk & MSK_PM_ON_FULL) {
1119 pm = NVI_PM_ON_FULL;
1120 } else if (st->en_msk & MSK_PM_ON) {
1122 } else if ((st->en_msk & ((1 << EN_LP) |
1123 MSK_DEV_ALL)) == MSK_PM_LP) {
1124 if (st->snsr[DEV_ACC].period_us >=
1125 st->snsr[DEV_ACC].cfg.thresh_hi) {
1126 for (lp = 0; lp < st->hal->lp_tbl_n; lp++) {
1127 if (st->snsr[DEV_ACC].period_us >=
1128 st->hal->lp_tbl[lp])
1131 pm = NVI_PM_ON_CYCLE;
1135 } else if (st->en_msk & MSK_PM_LP) {
1137 } else if (st->en_msk & MSK_PM_STDBY || st->aux.bypass_lock) {
1144 if ((pm_req > NVI_PM_STDBY) && (pm_req < st->pm))
1149 if (pm == NVI_PM_OFF) {
1150 for (i = 0; i < AUX_PORT_IO; i++) {
1151 if (st->aux.port[i].nmp.shutdown_bypass) {
1152 nvi_aux_bypass_enable(st, true);
1157 if (st->en_msk & (1 << FW_LOADED))
1162 case NVI_PM_OFF_FORCE:
1167 pm2 = (BIT_PWR_ACCEL_STBY | BIT_PWR_GYRO_STBY);
1170 case NVI_PM_ON_CYCLE:
1172 pm2 &= ~BIT_PWR_ACCEL_STBY;
1176 pm1 = INV_CLK_INTERNAL;
1177 if (pm2 & BIT_PWR_ACCEL_STBY) {
1178 for (i = 0; i < DEV_N_AUX; i++) {
1179 if (MSK_PM_ACC_EN & (1 << i)) {
1180 if (st->snsr[i].enable) {
1181 pm2 &= ~BIT_PWR_ACCEL_STBY;
1190 case NVI_PM_ON_FULL:
1192 /* gyro must be turned on before going to PLL clock */
1193 pm2 &= ~BIT_PWR_GYRO_STBY;
1197 dev_err(&st->i2c->dev, "%s %d=>%d ERR=EINVAL\n",
1198 __func__, st->pm, pm);
1202 if (pm != st->pm || lp != st->rc.lp_config || pm2 != (st->rc.pm2 &
1203 (BIT_PWR_ACCEL_STBY | BIT_PWR_GYRO_STBY))) {
1204 if (pm == NVI_PM_OFF) {
1205 if (st->pm > NVI_PM_OFF || st->pm == NVI_PM_ERR)
1206 ret |= nvi_wr_pm1(st, __func__, BIT_H_RESET);
1207 ret |= nvi_pm_w(st, pm1, pm2, lp);
1208 ret |= nvs_vregs_disable(&st->i2c->dev, st->vreg,
1209 ARRAY_SIZE(nvi_vregs));
1211 if (pm == NVI_PM_ON_CYCLE)
1212 /* last chance to write to regs before cycle */
1213 ret |= nvi_int_able(st, __func__, true);
1214 ret |= nvi_pm_w(st, pm1, pm2, lp);
1215 if (pm > NVI_PM_STDBY)
1216 mdelay(REG_UP_TIME);
1219 dev_err(&st->i2c->dev, "%s PM %d=>%d ERR=%d\n",
1220 __func__, st->pm, pm, ret);
1223 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1224 dev_info(&st->i2c->dev,
1225 "%s-%s PM %d=>%d PM1=%x PM2=%x LP=%x\n",
1226 __func__, fn, st->pm, pm, pm1, pm2, lp);
1234 static void nvi_pm_exit(struct nvi_state *st)
1237 nvi_pm(st, __func__, NVI_PM_OFF_FORCE);
1238 nvs_vregs_exit(&st->i2c->dev, st->vreg, ARRAY_SIZE(nvi_vregs));
1241 static int nvi_pm_init(struct nvi_state *st)
1245 ret = nvs_vregs_init(&st->i2c->dev,
1246 st->vreg, ARRAY_SIZE(nvi_vregs), nvi_vregs);
1247 st->pm = NVI_PM_ERR;
1251 static int nvi_dmp_fw(struct nvi_state *st)
1253 #if NVI_FW_CRC_CHECK
1255 #endif /* NVI_FW_CRC_CHECK */
1258 st->icm_dmp_war = false;
1262 #if NVI_FW_CRC_CHECK
1263 crc32 = crc32(0, st->hal->dmp->fw, st->hal->dmp->fw_len);
1264 if (crc32 != st->hal->dmp->fw_crc32) {
1265 dev_err(&st->i2c->dev, "%s FW CRC FAIL %x != %x\n",
1266 __func__, crc32, st->hal->dmp->fw_crc32);
1269 #endif /* NVI_FW_CRC_CHECK */
1271 ret = nvi_user_ctrl_en(st, __func__, false, false, false, false);
1275 ret = nvi_mem_wr(st, st->hal->dmp->fw_mem_addr,
1276 st->hal->dmp->fw_len,
1277 (u8 *)st->hal->dmp->fw, true);
1279 dev_err(&st->i2c->dev, "%s ERR: nvi_mem_wr\n", __func__);
1283 ret = nvi_i2c_write_rc(st, &st->hal->reg->fw_start,
1284 st->hal->dmp->fw_start,
1285 __func__, NULL, true);
1289 ret = st->hal->dmp->fn_init(st); /* nvi_dmp_init */
1291 dev_err(&st->i2c->dev, "%s ERR: nvi_dmp_init\n", __func__);
1295 nvi_user_ctrl_en(st, __func__, false, false, false, false);
1296 st->en_msk |= (1 << FW_LOADED);
1300 void nvi_push_delay(struct nvi_state *st)
1304 for (i = 0; i < DEV_MPU_N; i++) {
1305 if (st->snsr[i].enable) {
1306 if (st->snsr[i].push_delay_ns &&
1307 !st->snsr[i].ts_push_delay)
1308 st->snsr[i].ts_push_delay = nvs_timestamp() +
1309 st->snsr[i].push_delay_ns;
1311 st->snsr[i].ts_push_delay = 0;
1316 int nvi_aux_delay(struct nvi_state *st, const char *fn)
1319 unsigned int msk_en;
1320 unsigned int src_us;
1325 /* determine valid delays by ports enabled */
1327 msk_en = st->snsr[DEV_AUX].enable | st->aux.dmp_en_msk;
1328 for (i = 0; msk_en; i++) {
1329 if (msk_en & (1 << i)) {
1330 msk_en &= ~(1 << i);
1331 if (delay < st->aux.port[i].nmp.delay_ms)
1332 delay = st->aux.port[i].nmp.delay_ms;
1335 src_us = st->src[st->hal->dev[DEV_AUX]->src].period_us_src;
1337 delay *= 1000; /* ms => us */
1338 if (delay % src_us) {
1348 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1349 dev_info(&st->i2c->dev, "%s-%s aux.delay_hw=%u=>%u\n",
1350 __func__, fn, st->aux.delay_hw, delay);
1351 st->aux.delay_hw = delay;
1352 ret = nvi_wr_i2c_slv4_ctrl(st, (bool)
1353 (st->rc.i2c_slv4_ctrl & BIT_SLV_EN));
1354 /* HW port delay enable */
1355 val = BIT_DELAY_ES_SHADOW;
1356 for (i = 0; i < AUX_PORT_MAX; i++) {
1357 if (st->aux.port[i].nmp.delay_ms)
1360 ret |= nvi_i2c_wr_rc(st, &st->hal->reg->i2c_mst_delay_ctrl, val,
1361 __func__, &st->rc.i2c_mst_delay_ctrl);
1365 static int nvi_timeout(struct nvi_state *st)
1367 bool disabled = true;
1368 unsigned int timeout_us = -1;
1371 /* find the fastest batch timeout of all the enabled devices */
1372 for (i = 0; i < DEV_N_AUX; i++) {
1373 if (st->snsr[i].enable) {
1374 if (st->snsr[i].timeout_us < timeout_us)
1375 timeout_us = st->snsr[i].timeout_us;
1380 disabled = true; /* batch mode is currently disabled */
1382 timeout_us = 0; /* batch mode disabled */
1383 if (timeout_us != st->bm_timeout_us) {
1384 st->bm_timeout_us = timeout_us;
1391 static int nvi_period_src(struct nvi_state *st, int src)
1393 bool enabled = false;
1394 unsigned int period_us = -1;
1395 unsigned int dev_msk;
1401 /* find the fastest period of all the enabled devices */
1402 dev_msk = st->hal->src[src].dev_msk;
1403 for (i = 0; dev_msk; i++) {
1404 if (dev_msk & (1 << i)) {
1405 dev_msk &= ~(1 << i);
1406 if (st->snsr[i].enable && st->snsr[i].period_us) {
1407 if (st->snsr[i].period_us < period_us)
1408 period_us = st->snsr[i].period_us;
1415 if (period_us < st->src[src].period_us_min)
1416 period_us = st->src[src].period_us_min;
1417 if (period_us > st->src[src].period_us_max)
1418 period_us = st->src[src].period_us_max;
1419 if (period_us != st->src[src].period_us_req) {
1420 st->src[src].period_us_req = period_us;
1428 int nvi_period_aux(struct nvi_state *st)
1430 bool enabled = false;
1431 unsigned int period_us = -1;
1432 unsigned int timeout_us = -1;
1433 unsigned int msk_en;
1437 msk_en = st->snsr[DEV_AUX].enable | st->aux.dmp_en_msk;
1438 for (i = 0; msk_en; i++) {
1439 if (msk_en & (1 << i)) {
1440 msk_en &= ~(1 << i);
1441 if (st->aux.port[i].period_us) {
1442 if (st->aux.port[i].period_us < period_us)
1443 period_us = st->aux.port[i].period_us;
1444 if (st->aux.port[i].timeout_us < timeout_us)
1446 st->aux.port[i].timeout_us;
1453 st->snsr[DEV_AUX].period_us = period_us;
1454 st->snsr[DEV_AUX].timeout_us = timeout_us;
1456 ret = nvi_period_src(st, st->hal->dev[DEV_AUX]->src);
1457 ret |= nvi_timeout(st);
1461 static int nvi_period_all(struct nvi_state *st)
1466 for (src = 0; src < st->hal->src_n; src++) {
1467 if (st->hal->src[src].dev_msk & (1 << DEV_AUX))
1468 continue; /* run nvi_period_aux last for timeout */
1470 ret |= nvi_period_src(st, src);
1473 ret |= nvi_period_aux(st);
1477 static int nvi_en(struct nvi_state *st)
1479 bool dmp_en = false;
1485 if (st->snsr[DEV_GYR].enable) {
1486 ret_t = nvi_pm(st, __func__, NVI_PM_ON_FULL);
1490 for (i = 0; i < DEV_N_AUX; i++) {
1491 if (st->snsr[i].enable) {
1492 ret_t = nvi_pm(st, __func__, NVI_PM_ON);
1499 ret_t = nvi_pm(st, __func__, NVI_PM_AUTO);
1500 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1501 dev_info(&st->i2c->dev, "%s en_msk=%x err=%d\n",
1502 __func__, st->en_msk, ret_t);
1506 ret_t |= nvi_int_able(st, __func__, false);
1507 ret_t |= nvi_user_ctrl_en(st, __func__, false, false, false, false);
1509 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1510 dev_err(&st->i2c->dev, "%s en_msk=%x ERR=%d\n",
1511 __func__, st->en_msk, ret_t);
1515 if (st->en_msk & (1 << FW_LOADED)) {
1516 /* test if batch is needed or more specifically that an
1517 * enabled sensor doesn't support batch. The DMP can't
1518 * do batch and non-batch at the same time.
1520 if (st->bm_timeout_us) {
1523 /* batch disabled - test if a DMP sensor is enabled */
1524 for (i = 0; i < DEV_N_AUX; i++) {
1525 if (st->dmp_en_msk & (1 << i)) {
1526 if (st->snsr[i].enable) {
1535 ret_t |= st->hal->dmp->fn_en(st); /* nvi_dmp_en */
1536 st->en_msk |= (1 << DEV_DMP);
1538 /* reprogram for non-DMP mode below */
1540 if (st->sts & (NVS_STS_SPEW_MSG |
1542 dev_err(&st->i2c->dev,
1546 if (st->sts & (NVS_STS_SPEW_MSG |
1548 dev_info(&st->i2c->dev,
1549 "%s DMP enabled\n", __func__);
1554 if (st->en_msk & (1 << DEV_DMP)) {
1555 st->en_msk &= ~(MSK_DEV_SNSR | (1 << DEV_DMP));
1556 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1557 dev_info(&st->i2c->dev,
1558 "%s DMP disabled\n", __func__);
1559 if (st->aux.dmp_en_msk) {
1560 st->aux.dmp_en_msk = 0;
1561 nvi_aux_enable(st, __func__, true, true);
1563 for (i = 0; i < DEV_N_AUX; i++)
1564 st->snsr[i].odr = 0;
1566 for (i = 0; i < AUX_PORT_MAX; i++)
1567 st->aux.port[i].odr = 0;
1570 for (i = 0; i < st->hal->src_n; i++)
1571 ret_t |= st->hal->src[i].fn_period(st);
1573 if (st->snsr[DEV_ACC].enable) {
1574 ret = st->hal->fn->en_acc(st);
1577 st->en_msk &= ~(1 << DEV_ACC);
1579 st->en_msk |= (1 << DEV_ACC);
1582 if (st->snsr[DEV_GYR].enable) {
1583 ret = st->hal->fn->en_gyr(st);
1586 st->en_msk &= ~(1 << DEV_GYR);
1588 st->en_msk |= (1 << DEV_GYR);
1592 /* NVI_PM_AUTO to go to NVI_PM_ON_CYCLE if need be */
1593 /* this also restores correct PM mode if error */
1594 ret_t |= nvi_pm(st, __func__, NVI_PM_AUTO);
1595 if (st->pm > NVI_PM_ON_CYCLE)
1596 ret_t |= nvi_reset(st, __func__, true, false, true);
1598 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG))
1599 dev_info(&st->i2c->dev, "%s en_msk=%x err=%d\n",
1600 __func__, st->en_msk, ret_t);
1604 static void nvi_aux_dbg(struct nvi_state *st, char *tag, int val)
1606 struct nvi_mpu_port *n;
1608 struct aux_ports *a;
1613 if (!(st->sts & NVI_DBG_SPEW_AUX))
1616 dev_info(&st->i2c->dev, "%s %s %d\n", __func__, tag, val);
1618 for (i = 0; i < AUX_PORT_IO; i++) {
1619 ret = nvi_i2c_rd(st, &st->hal->reg->i2c_slv_addr[i], &data[0]);
1620 ret |= nvi_i2c_rd(st, &st->hal->reg->i2c_slv_reg[i], &data[1]);
1621 ret |= nvi_i2c_rd(st, &st->hal->reg->i2c_slv_ctrl[i],
1623 ret |= nvi_i2c_rd(st, &st->hal->reg->i2c_slv_do[i], &data[3]);
1626 pr_info("HW: ERR=%d\n", ret);
1628 pr_info("HW: P%d AD=%x RG=%x CL=%x DO=%x\n",
1629 i, data[0], data[1], data[2], data[3]);
1630 /* RC = hardware register cache */
1631 pr_info("HC: P%d AD=%x RG=%x CL=%x DO=%x\n",
1632 i, st->rc.i2c_slv_addr[i], st->rc.i2c_slv_reg[i],
1633 st->rc.i2c_slv_ctrl[i], st->rc.i2c_slv_do[i]);
1634 n = &st->aux.port[i].nmp;
1635 /* NS = nmp structure */
1636 pr_info("NS: P%d AD=%x RG=%x CL=%x DO=%x MS=%u US=%u SB=%x\n",
1637 i, n->addr, n->reg, n->ctrl, n->data_out, n->delay_ms,
1638 st->aux.port[i].period_us, n->shutdown_bypass);
1639 p = &st->aux.port[i];
1640 /* PS = port structure */
1641 pr_info("PS: P%d EDO=%u ODR=%u DMP_CTRL=%x EN=%x HWDOUT=%x\n",
1642 i, p->ext_data_offset, p->odr,
1643 !!(a->dmp_ctrl_msk & (1 << i)),
1644 !!(st->snsr[DEV_AUX].enable & (1 << i)), p->hw_do);
1647 pr_info("AUX: EN=%x MEN=%x DEN=%x DLY=%x SRC=%u DN=%u BEN=%x BLK=%d\n",
1648 !!(st->en_msk & (1 << DEV_AUX)),
1649 !!(st->rc.user_ctrl & BIT_I2C_MST_EN), st->aux.dmp_en_msk,
1650 (st->rc.i2c_slv4_ctrl & BITS_I2C_MST_DLY),
1651 st->src[st->hal->dev[DEV_AUX]->src].period_us_src,
1652 a->ext_data_n, (st->rc.int_pin_cfg & BIT_BYPASS_EN),
1656 static void nvi_aux_ext_data_offset(struct nvi_state *st)
1659 unsigned int offset = 0;
1661 for (i = 0; i < AUX_PORT_IO; i++) {
1662 if (st->aux.port[i].nmp.addr & BIT_I2C_READ) {
1663 st->aux.port[i].ext_data_offset = offset;
1664 offset += (st->rc.i2c_slv_ctrl[i] &
1665 BITS_I2C_SLV_CTRL_LEN);
1668 if (offset > AUX_EXT_DATA_REG_MAX) {
1669 offset = AUX_EXT_DATA_REG_MAX;
1670 dev_err(&st->i2c->dev,
1671 "%s ERR MPU slaves exceed data storage\n", __func__);
1673 st->aux.ext_data_n = offset;
1677 static int nvi_aux_port_data_out(struct nvi_state *st,
1678 int port, u8 data_out)
1682 ret = nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_do[port], data_out,
1683 NULL, &st->rc.i2c_slv_do[port]);
1685 st->aux.port[port].nmp.data_out = data_out;
1686 st->aux.port[port].hw_do = true;
1688 st->aux.port[port].hw_do = false;
1693 static int nvi_aux_port_wr(struct nvi_state *st, int port)
1695 struct aux_port *ap;
1698 ap = &st->aux.port[port];
1699 ret = nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_addr[port],
1700 ap->nmp.addr, __func__, &st->rc.i2c_slv_addr[port]);
1701 ret |= nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_reg[port], ap->nmp.reg,
1702 __func__, &st->rc.i2c_slv_reg[port]);
1703 ret |= nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_do[port],
1704 ap->nmp.data_out, __func__, &st->rc.i2c_slv_do[port]);
1708 static int nvi_aux_port_en(struct nvi_state *st, int port, bool en)
1710 struct aux_port *ap;
1714 unsigned int dmp_ctrl_msk;
1717 ap = &st->aux.port[port];
1718 if (en && ap->nmp.addr != st->rc.i2c_slv_addr[port]) {
1719 ret = nvi_aux_port_wr(st, port);
1723 if (en && !ap->hw_do)
1724 nvi_aux_port_data_out(st, port, ap->nmp.data_out);
1725 if (port == AUX_PORT_IO) {
1726 ret = nvi_wr_i2c_slv4_ctrl(st, en);
1728 slv_ctrl = st->rc.i2c_slv_ctrl[port];
1730 dmp_ctrl_msk = st->aux.dmp_ctrl_msk;
1732 ctrl = ap->nmp.ctrl;
1733 if (ap->dd && st->en_msk & (1 << DEV_DMP)) {
1734 reg = ap->dd->dmp_rd_reg;
1735 if (ctrl != ap->dd->dmp_rd_ctrl) {
1736 ctrl = ap->dd->dmp_rd_ctrl;
1737 st->aux.dmp_ctrl_msk |= (1 << port);
1740 st->aux.dmp_ctrl_msk &= ~(1 << port);
1742 if (dmp_ctrl_msk != st->aux.dmp_ctrl_msk)
1743 /* AUX HW needs to be reset if slv_ctrl values
1744 * change other than enable bit.
1746 st->aux.reset_i2c = true;
1747 ret = nvi_i2c_wr_rc(st,
1748 &st->hal->reg->i2c_slv_reg[port],
1750 &st->rc.i2c_slv_reg[port]);
1754 st->aux.dmp_ctrl_msk &= ~(1 << port);
1756 ret |= nvi_i2c_wr_rc(st, &st->hal->reg->i2c_slv_ctrl[port],
1757 ctrl, __func__, &st->rc.i2c_slv_ctrl[port]);
1758 if (slv_ctrl != st->rc.i2c_slv_ctrl[port])
1759 nvi_aux_ext_data_offset(st);
1764 int nvi_aux_enable(struct nvi_state *st, const char *fn,
1765 bool en_req, bool force)
1767 bool enable = en_req;
1768 bool enabled = false;
1770 unsigned int msk_en;
1774 if (st->rc.int_pin_cfg & BIT_BYPASS_EN)
1776 /* global enable is honored only if a port is enabled */
1777 msk_en = st->snsr[DEV_AUX].enable | st->aux.dmp_en_msk;
1780 if (st->en_msk & (1 << DEV_AUX))
1782 if (force || enable != enabled) {
1784 st->en_msk |= (1 << DEV_AUX);
1785 for (i = 0; i < AUX_PORT_MAX; i++) {
1786 if (msk_en & (1 << i))
1790 ret |= nvi_aux_port_en(st, i, en);
1793 st->en_msk &= ~(1 << DEV_AUX);
1794 for (i = 0; i < AUX_PORT_MAX; i++) {
1795 if (st->rc.i2c_slv_addr[i])
1796 nvi_aux_port_en(st, i, false);
1799 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG |
1801 dev_info(&st->i2c->dev,
1802 "%s-%s en_req=%x enabled: %x->%x err=%d\n",
1803 __func__, fn, en_req, enabled, enable, ret);
1808 static int nvi_aux_port_enable(struct nvi_state *st,
1809 unsigned int port_mask, bool en)
1811 unsigned int enabled;
1815 enabled = st->snsr[DEV_AUX].enable;
1817 st->snsr[DEV_AUX].enable |= port_mask;
1819 st->snsr[DEV_AUX].enable &= ~port_mask;
1820 if (enabled == st->snsr[DEV_AUX].enable)
1823 if (st->hal->dev[DEV_AUX]->fifo_en_msk) {
1825 for (i = 0; i < AUX_PORT_IO; i++) {
1826 if (port_mask & (1 << i)) {
1827 if (st->aux.port[i].nmp.addr & BIT_I2C_READ)
1828 st->aux.reset_fifo = true;
1832 if (en && (st->rc.int_pin_cfg & BIT_BYPASS_EN))
1836 for (i = 0; i < AUX_PORT_MAX; i++) {
1837 if (port_mask & (1 << i))
1838 ret |= nvi_aux_port_en(st, i, en);
1840 ret |= nvi_aux_enable(st, __func__, true, false);
1842 if (port_mask & ((1 << AUX_PORT_IO) - 1))
1847 static int nvi_aux_port_free(struct nvi_state *st, int port)
1849 memset(&st->aux.port[port], 0, sizeof(struct aux_port));
1850 st->snsr[DEV_AUX].enable &= ~(1 << port);
1851 st->aux.dmp_en_msk &= ~(1 << port);
1852 if (st->rc.i2c_slv_addr[port]) {
1853 nvi_aux_port_wr(st, port);
1854 nvi_aux_port_en(st, port, false);
1855 nvi_aux_enable(st, __func__, false, false);
1856 nvi_user_ctrl_en(st, __func__, false, false, false, false);
1857 nvi_aux_enable(st, __func__, true, false);
1858 if (port != AUX_PORT_IO)
1859 st->aux.reset_i2c = true;
1866 static int nvi_aux_port_alloc(struct nvi_state *st,
1867 struct nvi_mpu_port *nmp, int port)
1869 struct nvi_aux_port_dmp_dev *dd = NULL;
1870 struct nvi_dmp_aux_port *ap;
1874 if (st->aux.reset_i2c)
1875 nvi_reset(st, __func__, false, true, true);
1877 for (i = 0; i < AUX_PORT_IO; i++) {
1878 if (!st->aux.port[i].nmp.addr)
1879 /* port available */
1882 if (i < AUX_PORT_IO)
1887 if (st->aux.port[port].nmp.addr)
1892 /* override port setting if DMP used */
1893 if (st->hal->dmp && port < AUX_PORT_IO) {
1894 for (i = 0; i < st->hal->dmp->ap_n; i++) {
1895 ap = &st->hal->dmp->ap[i];
1896 if (nmp->type == ap->type && ap->port_rd ==
1897 (bool)(nmp->addr & BIT_I2C_READ)) {
1899 for (j = 0; j < ap->dd_n; j++) {
1900 if (nmp->id == ap->dd[j].dev) {
1906 /* device not supported */
1913 if (i < st->hal->dmp->ap_n && !st->aux.port[ap->port].nmp.addr)
1919 memset(&st->aux.port[port], 0, sizeof(struct aux_port));
1920 memcpy(&st->aux.port[port].nmp, nmp, sizeof(struct nvi_mpu_port));
1921 st->aux.port[port].dd = dd;
1922 st->aux.port[port].nmp.ctrl &= ~BIT_SLV_EN;
1923 st->aux.port[port].period_us = st->aux.port[port].nmp.period_us;
1927 static int nvi_aux_bypass_enable(struct nvi_state *st, bool en)
1932 if (en && (st->rc.int_pin_cfg & BIT_BYPASS_EN))
1935 val = st->rc.int_pin_cfg;
1937 ret = nvi_aux_enable(st, __func__, false, false);
1938 ret |= nvi_user_ctrl_en(st, __func__,
1939 false, false, false, false);
1941 val |= BIT_BYPASS_EN;
1942 ret = nvi_i2c_wr_rc(st, &st->hal->reg->int_pin_cfg,
1943 val, __func__, &st->rc.int_pin_cfg);
1946 val &= ~BIT_BYPASS_EN;
1947 ret = nvi_i2c_wr_rc(st, &st->hal->reg->int_pin_cfg, val,
1948 __func__, &st->rc.int_pin_cfg);
1950 nvi_aux_enable(st, __func__, true, false);
1957 static int nvi_aux_bypass_request(struct nvi_state *st, bool enable)
1963 if ((bool)(st->rc.int_pin_cfg & BIT_BYPASS_EN) == enable) {
1964 st->aux.bypass_timeout_ns = nvs_timestamp();
1965 st->aux.bypass_lock++;
1966 if (!st->aux.bypass_lock)
1967 dev_err(&st->i2c->dev, "%s rollover ERR\n", __func__);
1969 if (st->aux.bypass_lock) {
1970 ns = nvs_timestamp() - st->aux.bypass_timeout_ns;
1971 to = st->bypass_timeout_ms;
1974 st->aux.bypass_lock = 0;
1978 if (!st->aux.bypass_lock) {
1979 ret = nvi_aux_bypass_enable(st, enable);
1981 dev_err(&st->i2c->dev, "%s ERR=%d\n",
1984 st->aux.bypass_lock++;
1990 static int nvi_aux_bypass_release(struct nvi_state *st)
1994 if (st->aux.bypass_lock)
1995 st->aux.bypass_lock--;
1996 if (!st->aux.bypass_lock) {
1997 ret = nvi_aux_bypass_enable(st, false);
1999 dev_err(&st->i2c->dev, "%s ERR=%d\n", __func__, ret);
2004 static int nvi_aux_dev_valid(struct nvi_state *st,
2005 struct nvi_mpu_port *nmp, u8 *data)
2011 /* turn off bypass */
2012 ret = nvi_aux_bypass_request(st, false);
2016 /* grab the special port */
2017 ret = nvi_aux_port_alloc(st, nmp, AUX_PORT_IO);
2018 if (ret != AUX_PORT_IO) {
2019 nvi_aux_bypass_release(st);
2023 /* enable it at fastest speed */
2024 st->aux.port[AUX_PORT_IO].nmp.delay_ms = 0;
2025 st->aux.port[AUX_PORT_IO].period_us =
2026 st->src[st->hal->dev[DEV_AUX]->src].period_us_min;
2027 ret = nvi_user_ctrl_en(st, __func__, false, false, false, false);
2028 ret |= nvi_aux_port_enable(st, 1 << AUX_PORT_IO, true);
2029 ret |= nvi_user_ctrl_en(st, __func__, false, false, true, false);
2031 nvi_aux_port_free(st, AUX_PORT_IO);
2032 nvi_aux_bypass_release(st);
2036 /* now turn off all the other ports for fastest response */
2037 for (i = 0; i < AUX_PORT_IO; i++) {
2038 if (st->rc.i2c_slv_addr[i])
2039 nvi_aux_port_en(st, i, false);
2041 /* start reading the results */
2042 for (i = 0; i < AUX_DEV_VALID_READ_LOOP_MAX; i++) {
2043 mdelay(AUX_DEV_VALID_READ_DELAY_MS);
2045 ret = nvi_i2c_rd(st, &st->hal->reg->i2c_mst_status, &val);
2052 /* these will restore all previously disabled ports */
2053 nvi_aux_bypass_release(st);
2054 nvi_aux_port_free(st, AUX_PORT_IO);
2055 if (i >= AUX_DEV_VALID_READ_LOOP_MAX)
2058 if (val & 0x10) /* NACK */
2061 if (nmp->addr & BIT_I2C_READ) {
2062 ret = nvi_i2c_rd(st, &st->hal->reg->i2c_slv4_di, &val);
2067 dev_info(&st->i2c->dev, "%s MPU read 0x%x from device 0x%x\n",
2068 __func__, val, (nmp->addr & ~BIT_I2C_READ));
2070 dev_info(&st->i2c->dev, "%s MPU found device 0x%x\n",
2071 __func__, (nmp->addr & ~BIT_I2C_READ));
2076 static int nvi_aux_mpu_call_pre(struct nvi_state *st, int port)
2078 if ((port < 0) || (port >= AUX_PORT_IO))
2081 if (st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))
2084 if (!st->aux.port[port].nmp.addr)
2090 static int nvi_aux_mpu_call_post(struct nvi_state *st,
2095 nvi_aux_dbg(st, tag, ret);
2099 /* See the mpu.h file for details on the nvi_mpu_ calls.
2101 int nvi_mpu_dev_valid(struct nvi_mpu_port *nmp, u8 *data)
2103 struct nvi_state *st = nvi_state_local;
2107 if (st->sts & NVI_DBG_SPEW_AUX)
2108 pr_info("%s\n", __func__);
2110 pr_debug("%s ERR -EAGAIN\n", __func__);
2117 if ((nmp->addr & BIT_I2C_READ) && (data == NULL))
2121 if (!(st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))) {
2122 nvi_pm(st, __func__, NVI_PM_ON);
2123 ret = nvi_aux_dev_valid(st, nmp, data);
2124 nvi_pm(st, __func__, NVI_PM_AUTO);
2125 nvi_aux_dbg(st, "nvi_mpu_dev_valid=", ret);
2127 nvi_mutex_unlock(st);
2130 EXPORT_SYMBOL(nvi_mpu_dev_valid);
2132 int nvi_mpu_port_alloc(struct nvi_mpu_port *nmp)
2134 struct nvi_state *st = nvi_state_local;
2138 if (st->sts & NVI_DBG_SPEW_AUX)
2139 pr_info("%s\n", __func__);
2141 pr_debug("%s ERR -EAGAIN\n", __func__);
2145 if (nmp == NULL || !(nmp->ctrl & BITS_I2C_SLV_CTRL_LEN))
2148 if (nmp->addr & BIT_I2C_READ && !nmp->handler)
2152 if (!(st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))) {
2153 nvi_pm(st, __func__, NVI_PM_ON);
2154 ret = nvi_aux_port_alloc(st, nmp, -1);
2155 if (ret >= 0 && st->hal->dmp)
2156 /* need to reinitialize DMP for new device */
2157 st->hal->dmp->fn_init(st);
2158 nvi_pm(st, __func__, NVI_PM_AUTO);
2159 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_port_alloc=", ret);
2161 nvi_mutex_unlock(st);
2164 EXPORT_SYMBOL(nvi_mpu_port_alloc);
2166 int nvi_mpu_port_free(int port)
2168 struct nvi_state *st = nvi_state_local;
2172 if (st->sts & NVI_DBG_SPEW_AUX)
2173 pr_info("%s port %d\n", __func__, port);
2175 pr_debug("%s port %d ERR -EAGAIN\n", __func__, port);
2180 ret = nvi_aux_mpu_call_pre(st, port);
2182 nvi_pm(st, __func__, NVI_PM_ON);
2183 ret = nvi_aux_port_free(st, port);
2184 nvi_pm(st, __func__, NVI_PM_AUTO);
2185 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_port_free=", ret);
2187 nvi_mutex_unlock(st);
2190 EXPORT_SYMBOL(nvi_mpu_port_free);
2192 int nvi_mpu_enable(unsigned int port_mask, bool enable)
2194 struct nvi_state *st = nvi_state_local;
2199 if (st->sts & NVI_DBG_SPEW_AUX)
2200 pr_info("%s port_mask 0x%x: %x\n",
2201 __func__, port_mask, enable);
2203 pr_debug("%s port_mask 0x%x: %x ERR -EAGAIN\n",
2204 __func__, port_mask, enable);
2208 if (port_mask >= (1 << AUX_PORT_IO) || !port_mask)
2211 for (i = 0; i < AUX_PORT_IO; i++) {
2212 if (port_mask & (1 << i)) {
2213 if (!st->aux.port[i].nmp.addr)
2219 if (st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND)) {
2222 nvi_pm(st, __func__, NVI_PM_ON);
2223 ret = nvi_aux_port_enable(st, port_mask, enable);
2224 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_enable=", ret);
2226 nvi_mutex_unlock(st);
2229 EXPORT_SYMBOL(nvi_mpu_enable);
2231 int nvi_mpu_delay_ms(int port, u8 delay_ms)
2233 struct nvi_state *st = nvi_state_local;
2237 if (st->sts & NVI_DBG_SPEW_AUX)
2238 pr_info("%s port %d: %u\n", __func__, port, delay_ms);
2240 pr_debug("%s port %d: %u ERR -EAGAIN\n",
2241 __func__, port, delay_ms);
2246 ret = nvi_aux_mpu_call_pre(st, port);
2248 st->aux.port[port].nmp.delay_ms = delay_ms;
2249 if (st->rc.i2c_slv_ctrl[port] & BIT_SLV_EN)
2250 ret = nvi_aux_delay(st, __func__);
2251 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_delay_ms=", ret);
2253 nvi_mutex_unlock(st);
2256 EXPORT_SYMBOL(nvi_mpu_delay_ms);
2258 int nvi_mpu_data_out(int port, u8 data_out)
2260 struct nvi_state *st = nvi_state_local;
2266 ret = nvi_aux_mpu_call_pre(st, port);
2268 if (st->rc.i2c_slv_ctrl[port] & BIT_SLV_EN) {
2269 ret = nvi_aux_port_data_out(st, port, data_out);
2271 st->aux.port[port].nmp.data_out = data_out;
2272 st->aux.port[port].hw_do = false;
2279 EXPORT_SYMBOL(nvi_mpu_data_out);
2281 int nvi_mpu_batch(int port, unsigned int period_us, unsigned int timeout_us)
2283 struct nvi_state *st = nvi_state_local;
2287 if (st->sts & NVI_DBG_SPEW_AUX)
2288 pr_info("%s port %d: p=%u t=%u\n",
2289 __func__, port, period_us, timeout_us);
2291 pr_debug("%s port %d: p=%u t=%u ERR -EAGAIN\n",
2292 __func__, port, period_us, timeout_us);
2297 ret = nvi_aux_mpu_call_pre(st, port);
2299 if (timeout_us && ((st->aux.port[port].nmp.id == ID_INVALID) ||
2300 (st->aux.port[port].nmp.id >= ID_INVALID_END))) {
2301 /* sensor not supported by DMP */
2304 st->aux.port[port].period_us = period_us;
2305 st->aux.port[port].timeout_us = timeout_us;
2306 ret = nvi_period_aux(st);
2307 if (st->en_msk & (1 << DEV_DMP) &&
2308 st->hal->dmp->fn_dev_batch) {
2309 /* batch can be done real-time with DMP on */
2311 ret = st->hal->dmp->fn_dev_batch(st, DEV_AUX,
2315 /* timings changed */
2318 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_batch=", ret);
2321 nvi_mutex_unlock(st);
2324 EXPORT_SYMBOL(nvi_mpu_batch);
2326 int nvi_mpu_flush(int port)
2328 struct nvi_state *st = nvi_state_local;
2332 if (st->sts & NVI_DBG_SPEW_AUX)
2333 pr_info("%s port %d\n", __func__, port);
2335 pr_debug("%s port %d ERR -EAGAIN\n", __func__, port);
2340 ret = nvi_aux_mpu_call_pre(st, port);
2342 if (st->hal->dev[DEV_AUX]->fifo_en_msk) {
2343 /* HW flush only when FIFO is used for AUX */
2344 st->aux.port[port].flush = true;
2345 ret = nvi_read(st, true);
2347 nvi_flush_aux(st, port);
2349 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_flush=", ret);
2351 nvi_mutex_unlock(st);
2354 EXPORT_SYMBOL(nvi_mpu_flush);
2356 int nvi_mpu_info(int read_port, struct nvi_mpu_inf *inf)
2358 struct nvi_state *st = nvi_state_local;
2359 struct nvi_aux_port_dmp_dev *dd;
2364 if (st->sts & NVI_DBG_SPEW_AUX)
2365 pr_info("%s port %d\n", __func__, read_port);
2367 pr_debug("%s port %d ERR -EAGAIN\n", __func__, read_port);
2375 ret = nvi_aux_mpu_call_pre(st, read_port);
2377 i = st->hal->dev[DEV_AUX]->src;
2378 inf->period_us_min = st->src[i].period_us_min;
2379 inf->period_us_max = st->src[i].period_us_max;
2380 /* batch not supported at this time */
2381 inf->fifo_reserve = 0;
2383 dd = st->aux.port[read_port].dd;
2385 inf->dmp_rd_len_sts = dd->dmp_rd_len_sts;
2386 inf->dmp_rd_len_data = dd->dmp_rd_len_data;
2387 inf->dmp_rd_be_sts = dd->dmp_rd_be_sts;
2388 inf->dmp_rd_be_data = dd->dmp_rd_be_data;
2390 inf->dmp_rd_len_sts = 0;
2391 inf->dmp_rd_len_data = 0;
2392 inf->dmp_rd_be_sts = false;
2393 inf->dmp_rd_be_data = false;
2395 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_info=", 0);
2397 nvi_mutex_unlock(st);
2400 EXPORT_SYMBOL(nvi_mpu_info);
2402 int nvi_mpu_bypass_request(bool enable)
2404 struct nvi_state *st = nvi_state_local;
2408 if (st->sts & NVI_DBG_SPEW_AUX)
2409 pr_info("%s enable=%x\n", __func__, enable);
2411 pr_debug("%s ERR -EAGAIN\n", __func__);
2416 if (!(st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))) {
2417 nvi_pm(st, __func__, NVI_PM_ON);
2418 ret = nvi_aux_bypass_request(st, enable);
2419 nvi_pm(st, __func__, NVI_PM_AUTO);
2420 ret = nvi_aux_mpu_call_post(st, "nvi_mpu_bypass_request=",
2423 nvi_mutex_unlock(st);
2426 EXPORT_SYMBOL(nvi_mpu_bypass_request);
2428 int nvi_mpu_bypass_release(void)
2430 struct nvi_state *st = nvi_state_local;
2433 if (st->sts & NVI_DBG_SPEW_AUX)
2434 pr_info("%s\n", __func__);
2436 pr_debug("%s\n", __func__);
2441 if (!(st->sts & (NVS_STS_SHUTDOWN | NVS_STS_SUSPEND))) {
2442 nvi_pm(st, __func__, NVI_PM_ON);
2443 nvi_aux_bypass_release(st);
2444 nvi_pm(st, __func__, NVI_PM_AUTO);
2445 nvi_aux_mpu_call_post(st, "nvi_mpu_bypass_release", 0);
2447 nvi_mutex_unlock(st);
2450 EXPORT_SYMBOL(nvi_mpu_bypass_release);
2453 int nvi_reset(struct nvi_state *st, const char *fn,
2454 bool rst_fifo, bool rst_i2c, bool en_irq)
2458 bool rst_dmp = false;
2462 ret = nvi_int_able(st, __func__, false);
2464 if (rst_i2c || st->aux.reset_i2c) {
2465 st->aux.reset_i2c = false;
2467 ret |= nvi_aux_enable(st, __func__, false, false);
2468 val |= BIT_I2C_MST_RST;
2471 st->aux.reset_fifo = false;
2472 val |= BIT_FIFO_RST;
2473 if (st->en_msk & (1 << DEV_DMP)) {
2476 ret |= nvi_aux_enable(st, __func__, false, false);
2479 ret |= nvi_user_ctrl_en(st, __func__,
2480 !rst_fifo, !rst_fifo, !rst_i2c, false);
2481 val |= st->rc.user_ctrl;
2482 ret |= nvi_user_ctrl_rst(st, val);
2483 if (rst_i2c || rst_dmp)
2484 ret |= nvi_aux_enable(st, __func__, true, false);
2485 ts = nvs_timestamp();
2487 for (i = 0; i < st->hal->src_n; i++) {
2488 st->src[i].ts_reset = true;
2489 st->src[i].ts_1st = ts;
2490 st->src[i].ts_end = ts;
2491 st->src[i].ts_period = st->src[i].period_us_src * 1000;
2494 for (i = 0; i < DEV_N_AUX; i++) {
2495 st->snsr[i].ts_reset = true;
2496 st->snsr[i].ts_last = ts;
2497 st->snsr[i].ts_n = 0;
2500 for (i = 0; i < AUX_PORT_MAX; i++) {
2501 st->aux.port[i].ts_reset = true;
2502 st->aux.port[i].ts_last = ts;
2507 ret |= st->hal->dmp->fn_clk_n(st, &st->dmp_clk_n);
2508 st->src[SRC_DMP].ts_reset = true;
2509 st->src[SRC_DMP].ts_1st = ts;
2510 st->src[SRC_DMP].ts_end = ts;
2511 st->src[SRC_DMP].ts_period =
2512 st->src[SRC_DMP].period_us_src * 1000;
2516 ret |= nvi_user_ctrl_en(st, __func__, true, true, true, en_irq);
2517 if (st->sts & (NVS_STS_SPEW_MSG | NVI_DBG_SPEW_MSG |
2518 NVI_DBG_SPEW_FIFO | NVI_DBG_SPEW_TS))
2519 dev_info(&st->i2c->dev,
2520 "%s-%s DMP=%x FIFO=%x I2C=%x ts=%lld err=%d\n",
2521 __func__, fn, rst_dmp, rst_fifo, rst_i2c, ts, ret);
2525 s64 nvi_ts_dev(struct nvi_state *st, s64 ts_now,
2526 unsigned int dev, unsigned int aux_port)
2532 if (st->en_msk & (1 << DEV_DMP))
2535 src = st->hal->dev[dev]->src;
2540 ts = nvs_timestamp();
2542 if (dev == DEV_AUX && aux_port < AUX_PORT_MAX) {
2543 if (st->aux.port[aux_port].ts_reset) {
2544 st->aux.port[aux_port].ts_reset = false;
2545 ts = st->src[src].ts_1st;
2547 ts = st->src[src].ts_period;
2548 if (st->aux.port[aux_port].odr)
2549 ts *= (st->aux.port[aux_port].odr + 1);
2550 ts += st->aux.port[aux_port].ts_last;
2553 if (st->snsr[dev].ts_reset) {
2554 st->snsr[dev].ts_reset = false;
2555 ts = st->src[src].ts_1st;
2557 ts = st->src[src].ts_period;
2558 if (st->snsr[dev].odr)
2559 ts *= (st->snsr[dev].odr + 1);
2560 ts += st->snsr[dev].ts_last;
2564 if (st->sts & (NVI_DBG_SPEW_FIFO | NVI_DBG_SPEW_TS))
2565 dev_info(&st->i2c->dev,
2566 "%s ts > ts_now (%lld > %lld)\n",
2567 __func__, ts, ts_now);
2571 if (dev == DEV_AUX && aux_port < AUX_PORT_MAX) {
2572 if (ts < st->aux.port[aux_port].ts_last)
2575 st->aux.port[aux_port].ts_last = ts;
2577 if (ts < st->snsr[dev].ts_last)
2580 st->snsr[dev].ts_last = ts;
2582 if (ts < st->snsr[dev].ts_push_delay)
2584 if (st->sts & NVI_DBG_SPEW_FIFO && src >= 0)
2585 dev_info(&st->i2c->dev,
2586 "src[%d] ts_period=%lld ts_end=%lld %s ts[%u]=%lld\n",
2587 src, st->src[src].ts_period, st->src[src].ts_end,
2588 st->snsr[dev].cfg.name, st->snsr[dev].ts_n, ts);
2589 st->snsr[dev].ts_n++;
2593 static void nvi_aux_rd(struct nvi_state *st)
2597 struct aux_port *ap;
2602 if ((!st->aux.ext_data_n) || (!(st->rc.user_ctrl & BIT_I2C_MST_EN)))
2605 ret = nvi_i2c_r(st, st->hal->reg->ext_sens_data_00.bank,
2606 st->hal->reg->ext_sens_data_00.reg,
2607 st->aux.ext_data_n, (u8 *)&st->aux.ext_data);
2611 ts = nvi_ts_dev(st, 0, DEV_AUX, -1);
2612 for (i = 0; i < AUX_PORT_IO; i++) {
2613 ap = &st->aux.port[i];
2614 if ((st->rc.i2c_slv_ctrl[i] & BIT_SLV_EN) &&
2615 ap->nmp.addr & BIT_I2C_READ) {
2616 p = &st->aux.ext_data[ap->ext_data_offset];
2617 len = ap->nmp.ctrl & BITS_I2C_SLV_CTRL_LEN;
2618 ap->nmp.handler(p, len, ts, ap->nmp.ext_driver);
2623 static s32 nvi_matrix(struct nvi_state *st, signed char *matrix,
2624 s32 x, s32 y, s32 z, unsigned int axis)
2626 return ((matrix[0 + axis] == 1 ? x :
2627 (matrix[0 + axis] == -1 ? -x : 0)) +
2628 (matrix[3 + axis] == 1 ? y :
2629 (matrix[3 + axis] == -1 ? -y : 0)) +
2630 (matrix[6 + axis] == 1 ? z :
2631 (matrix[6 + axis] == -1 ? -z : 0)));
2634 int nvi_push(struct nvi_state *st, unsigned int dev, u8 *buf, s64 ts)
2641 unsigned int buf_le_i;
2648 ch_sz = abs(st->snsr[dev].cfg.ch_sz);
2650 if (st->snsr[dev].buf_n) {
2651 n = st->snsr[dev].buf_n / st->snsr[dev].cfg.ch_n;
2652 m = st->snsr[dev].buf_n % st->snsr[dev].cfg.ch_n;
2658 /* convert big endian byte stream to little endian channel data */
2659 for (ch = 0; ch < st->snsr[dev].cfg.ch_n; ch++) {
2661 if (st->snsr[dev].enable & (1 << ch)) {
2662 if (m && ch == (st->snsr[dev].cfg.ch_n - 1)) {
2663 /* handle last channel misalignment */
2664 for (i = 0; i < m; i++) {
2666 val_le[ch] |= (u8)*buf++;
2668 /* extend sign bit */
2669 i = (sizeof(val_le[ch]) - m) * 8;
2673 for (i = 0; i < n; i++) {
2675 val_le[ch] |= (u8)*buf++;
2677 /* extend sign bit */
2678 i = (sizeof(val_le[ch]) - n) * 8;
2687 /* shift HW data size to channel size if needed */
2688 if (st->snsr[dev].buf_shft) {
2689 if (st->snsr[dev].buf_shft < 0) {
2690 n = abs(st->snsr[dev].buf_shft);
2691 for (ch = 0; ch < st->snsr[dev].cfg.ch_n; ch++)
2694 for (ch = 0; ch < st->snsr[dev].cfg.ch_n; ch++)
2695 val_le[ch] <<= st->snsr[dev].buf_shft;
2699 /* apply matrix if needed */
2700 if (st->snsr[dev].matrix) {
2701 for (ch = 0; ch < AXIS_N; ch++)
2702 val[ch] = val_le[ch];
2704 for (ch = 0; ch < AXIS_N; ch++)
2705 val_le[ch] = nvi_matrix(st, st->snsr[dev].cfg.matrix,
2706 val[AXIS_X], val[AXIS_Y],
2710 /* convert little endian channel data to little endian byte stream */
2712 for (ch = 0; ch < st->snsr[dev].cfg.ch_n; ch++) {
2713 u_val = (u32)val_le[ch];
2714 for (i = 0; i < ch_sz; i++) {
2715 buf_le[buf_le_i + i] = (u8)(u_val & 0xFF);
2721 /* add status if needed (no endian conversion) */
2722 if (buf_le_i < st->snsr[dev].cfg.snsr_data_n) {
2723 n = st->snsr[dev].cfg.snsr_data_n - buf_le_i;
2724 u_val = st->snsr[dev].sts;
2725 for (i = 0; i < n; i++) {
2726 buf_le[buf_le_i + i] = (u8)(u_val & 0xFF);
2732 if (st->sts & (NVI_DBG_SPEW_SNSR << dev)) {
2734 st->sts |= NVS_STS_SPEW_DATA;
2735 st->nvs->handler(st->snsr[dev].nvs_st, buf_le, ts);
2736 if (!(sts & NVS_STS_SPEW_DATA))
2737 st->sts &= ~NVS_STS_SPEW_DATA;
2739 st->nvs->handler(st->snsr[dev].nvs_st, buf_le, ts);
2745 static int nvi_push_event(struct nvi_state *st, unsigned int dev)
2747 s64 ts = nvs_timestamp();
2752 if (st->sts & (NVI_DBG_SPEW_SNSR << dev)) {
2754 st->sts |= NVS_STS_SPEW_DATA;
2755 ret = st->nvs->handler(st->snsr[dev].nvs_st, &val, ts);
2756 if (!(sts & NVS_STS_SPEW_DATA))
2757 st->sts &= ~NVS_STS_SPEW_DATA;
2759 ret = st->nvs->handler(st->snsr[dev].nvs_st, &val, ts);
2764 static int nvi_push_oneshot(struct nvi_state *st, unsigned int dev)
2766 /* disable now to avoid reinitialization on handler's disable */
2767 st->snsr[dev].enable = 0;
2768 st->en_msk &= ~(1 << dev);
2769 return nvi_push_event(st, dev);
2772 static int nvi_dev_rd(struct nvi_state *st, unsigned int dev)
2778 if (!st->snsr[dev].enable)
2781 len = st->snsr[dev].cfg.ch_n << 1;
2782 ret = nvi_i2c_r(st, st->hal->reg->out_h[dev].bank,
2783 st->hal->reg->out_h[dev].reg, len, buf);
2785 ret = nvi_push(st, dev, buf, nvi_ts_dev(st, 0, dev, 0));
2789 static int nvi_fifo_aux(struct nvi_state *st, s64 ts, unsigned int n)
2791 struct aux_port *ap;
2792 unsigned int fifo_data_n;
2795 ts = nvi_ts_dev(st, ts, DEV_AUX, -1);
2796 for (port = 0; port < AUX_PORT_IO; port++) {
2797 ap = &st->aux.port[port];
2798 if (st->rc.fifo_en & (1 << st->hal->bit->slv_fifo_en[port])) {
2799 fifo_data_n = ap->nmp.ctrl & BITS_I2C_SLV_CTRL_LEN;
2800 if (fifo_data_n > n)
2803 ap->nmp.handler(&st->buf[st->buf_i], fifo_data_n, ts,
2804 ap->nmp.ext_driver);
2805 st->buf_i += fifo_data_n;
2808 if (st->sts & (NVS_STS_SUSPEND | NVS_STS_SHUTDOWN))
2815 static int nvi_fifo_dev_rd(struct nvi_state *st, s64 ts, unsigned int n,
2818 if (st->sts & (NVS_STS_SUSPEND | NVS_STS_SHUTDOWN))
2821 if (st->hal->dev[dev]->fifo_data_n > n)
2824 nvi_push(st, dev, &st->buf[st->buf_i], nvi_ts_dev(st, ts, dev, 0));
2825 st->buf_i += st->hal->dev[dev]->fifo_data_n;
2829 static int nvi_fifo_dev(struct nvi_state *st, s64 ts, unsigned int n)
2834 dev = st->hal->fifo_dev[(st->rc.fifo_cfg >> 2) & 0x07];
2836 ret = nvi_fifo_aux(st, ts, n);
2838 ret = nvi_fifo_dev_rd(st, ts, n, dev);
2842 static int nvi_fifo_devs(struct nvi_state *st, s64 ts, unsigned int n)
2847 for (dev = 0; dev < DEV_MPU_N; dev++) {
2848 if (st->rc.fifo_en & st->hal->dev[dev]->fifo_en_msk) {
2849 ret = nvi_fifo_dev_rd(st, ts, n, dev);
2855 if (st->rc.fifo_en & st->hal->dev[DEV_AUX]->fifo_en_msk)
2856 ret = nvi_fifo_aux(st, ts, n);
2860 /* fifo_n_max can be used if we want to round-robin FIFOs */
2861 static int nvi_fifo_rd(struct nvi_state *st, int src, unsigned int fifo_n_max,
2862 int (*fn)(struct nvi_state *st, s64 ts, unsigned int n))
2871 unsigned int fifo_n;
2875 ts_end = nvs_timestamp();
2878 ret = st->hal->dmp->fn_clk_n(st, &dmp_clk_n);
2879 ret |= nvi_i2c_rd(st, &st->hal->reg->fifo_count_h, (u8 *)&fifo_count);
2880 if (ret || !fifo_count)
2883 ts_now = nvs_timestamp();
2884 if (ts_now < (ts_end + 5000000))
2888 ts_end = atomic64_read(&st->ts_irq);
2889 fifo_n = (unsigned int)be16_to_cpu(fifo_count);
2890 if (st->sts & NVS_STS_SPEW_IRQ)
2891 dev_info(&st->i2c->dev,
2892 "src=%d sync=%x fifo_n=%u ts_clk_n=%u ts_diff=%lld\n",
2893 src, sync, fifo_n, dmp_clk_n, ts_now - st->ts_now);
2894 st->ts_now = ts_now;
2897 if (dmp_clk_n > st->dmp_clk_n)
2898 ts_n = dmp_clk_n - st->dmp_clk_n;
2900 /* counter rolled over */
2901 ts_n = (~st->dmp_clk_n + 1) + dmp_clk_n;
2902 /* ts_n is the number of DMP clock ticks since last time */
2903 st->dmp_clk_n = dmp_clk_n;
2905 fifo_n_max = 0; /* DMP disables round-robin FIFOs */
2908 ts_n = fifo_n / st->src[src].fifo_data_n; /* TS's needed */
2909 if ((fifo_n % st->src[src].fifo_data_n) || !ts_n)
2910 /* reset FIFO if doesn't divide cleanly */
2915 ts_period = st->src[src].period_us_src;
2917 if (sync && ts_end > st->src[src].ts_end && ts_end < ts_now &&
2918 ts_end > (ts_now - (ts_period >> 2)))
2919 /* ts_irq is within the rate so sync to IRQ */
2921 if (st->src[src].ts_reset) {
2922 st->src[src].ts_reset = false;
2923 ts_end = st->src[src].ts_period * (ts_n - 1);
2925 st->src[src].ts_1st = ts_now - ts_end;
2926 st->src[src].ts_end = st->src[src].ts_1st;
2929 ts_end = st->src[src].ts_period * ts_n;
2931 ts_end += st->src[src].ts_end;
2932 /* ts_now will be sent to nvi_ts_dev where the timestamp is
2933 * prevented from going into the future which allows some
2934 * tolerance here for ts_end being a little more than ts_now.
2935 * The more tolerance we have the less recalculating the period
2936 * to avoid swing around the true period. Plus, the clamp on
2937 * ts_now in nvi_ts_dev has the benefit of "syncing" with the
2938 * current calculations per device.
2940 if (ts_end > (ts_now + (ts_period >> 3)) || (sync && (ts_end <
2941 (ts_now - (ts_period >> 1))))) {
2942 if (st->sts & (NVI_DBG_SPEW_FIFO | NVI_DBG_SPEW_TS)) {
2943 dev_info(&st->i2c->dev,
2944 "sync=%x now=%lld end=%lld ts_n=%u\n",
2945 sync, ts_now, ts_end, ts_n);
2946 dev_info(&st->i2c->dev,
2947 "src=%d old period=%lld end=%lld\n",
2948 src, st->src[src].ts_period,
2949 st->src[src].ts_end);
2951 /* st->src[src].ts_period needs to be adjusted */
2952 ts_period = ts_now - st->src[src].ts_end;
2953 do_div(ts_period, ts_n);
2954 st->src[src].ts_period = ts_period;
2955 ts_end = ts_period * ts_n;
2956 ts_end += st->src[src].ts_end;
2957 if (st->sts & (NVI_DBG_SPEW_FIFO | NVI_DBG_SPEW_TS))
2958 dev_info(&st->i2c->dev,
2959 "src=%d new period=%lld end=%lld\n",
2960 src, ts_period, ts_end);
2963 /* would only apply to FIFO timing (non-DMP) */
2964 if (fifo_n_max < fifo_n) {
2965 fifo_n = fifo_n_max;
2966 ts_n = fifo_n / st->src[src].fifo_data_n;
2967 ts_end = st->src[src].ts_period * ts_n;
2968 ts_end += st->src[src].ts_end;
2971 st->src[src].ts_end = ts_end;
2973 /* wasn't able to calculate TS */
2978 buf_n = sizeof(st->buf) - st->buf_i;
2981 ret = nvi_i2c_r(st, st->hal->reg->fifo_rw.bank,
2982 st->hal->reg->fifo_rw.reg,
2983 buf_n, &st->buf[st->buf_i]);
2990 /* fn updates st->buf_i */
2991 while (st->buf_i < buf_n) {
2992 ret = fn(st, ts_now, buf_n - st->buf_i);
2993 /* ret < 0: error to exit
2994 * ret = 0: not enough data to process
2995 * ret > 0: all done processing data
3003 memcpy(st->buf, &st->buf[st->buf_i], buf_n);
3015 static int nvi_rd(struct nvi_state *st)
3023 if (st->en_msk & (1 << DEV_DMP)) {
3024 if (st->en_msk & ((1 << DEV_SM) | (1 << DEV_STP))) {
3025 ret = nvi_i2c_rd(st, &st->hal->reg->int_dmp, &val);
3026 if (val & (1 << st->hal->bit->dmp_int_sm))
3027 nvi_push_oneshot(st, DEV_SM);
3028 if (val & (1 << st->hal->bit->dmp_int_stp))
3029 nvi_push_event(st, DEV_STP);
3031 if (st->en_msk & st->dmp_en_msk)
3033 return nvi_fifo_rd(st, -1, 0, st->hal->dmp->fn_rd);
3039 if (st->pm == NVI_PM_ON_CYCLE) {
3040 /* only low power accelerometer data */
3041 nvi_pm(st, __func__, NVI_PM_ON);
3042 ret = nvi_dev_rd(st, DEV_ACC);
3043 nvi_pm(st, __func__, NVI_PM_AUTO);
3047 nvi_dev_rd(st, DEV_TMP);
3048 if (!(st->rc.fifo_en & st->hal->dev[DEV_AUX]->fifo_en_msk))
3050 /* handle FIFO enabled data */
3051 if (st->rc.fifo_cfg & 0x01) {
3052 /* multi FIFO enabled */
3053 int_msk = 1 << st->hal->bit->int_data_rdy_0;
3054 for (fifo = 0; fifo < st->hal->fifo_n; fifo++) {
3055 if (st->rc.int_enable & (int_msk << fifo)) {
3056 ret = nvi_wr_fifo_cfg(st, fifo);
3060 src = st->hal->dev[st->hal->
3061 fifo_dev[fifo]]->src;
3062 ret = nvi_fifo_rd(st, src, 0, nvi_fifo_dev);
3063 if (st->buf_i || (ret < 0)) {
3064 /* HW FIFO misalignment - reset */
3071 /* st->fifo_src is either SRC_MPU or the source for the single
3072 * device enabled for the single FIFO in ICM.
3074 ret = nvi_fifo_rd(st, st->fifo_src, 0, nvi_fifo_devs);
3075 if (st->buf_i || (ret < 0)) {
3076 /* HW FIFO misalignment - reset */
3085 static int nvi_read(struct nvi_state *st, bool flush)
3089 if (st->irq_dis && !(st->sts & NVS_STS_SHUTDOWN)) {
3090 dev_err(&st->i2c->dev, "%s ERR: IRQ storm reset. n=%u\n",
3091 __func__, st->irq_storm_n);
3092 st->irq_storm_n = 0;
3093 nvi_pm(st, __func__, NVI_PM_ON);
3094 nvi_wr_pm1(st, __func__, BIT_H_RESET);
3097 } else if (!(st->sts & (NVS_STS_SUSPEND | NVS_STS_SHUTDOWN))) {
3100 nvi_en(st); /* a little harder reset for ICM DMP */
3102 nvi_reset(st, __func__, true, false, true);
3109 static irqreturn_t nvi_thread(int irq, void *dev_id)
3111 struct nvi_state *st = (struct nvi_state *)dev_id;
3114 nvi_read(st, false);
3115 nvi_mutex_unlock(st);
3119 static irqreturn_t nvi_handler(int irq, void *dev_id)
3121 struct nvi_state *st = (struct nvi_state *)dev_id;
3122 u64 ts = nvs_timestamp();
3123 u64 ts_old = atomic64_xchg(&st->ts_irq, ts);
3124 u64 ts_diff = ts - ts_old;
3126 /* test for MPU IRQ storm problem */
3127 if (ts_diff < NVI_IRQ_STORM_MIN_NS) {
3129 if (st->irq_storm_n > NVI_IRQ_STORM_MAX_N)
3130 nvi_disable_irq(st);
3132 st->irq_storm_n = 0;
3135 if (st->sts & NVS_STS_SPEW_IRQ)
3136 dev_info(&st->i2c->dev, "%s ts=%llu ts_diff=%llu irq_dis=%x\n",
3137 __func__, ts, ts_diff, st->irq_dis);
3138 return IRQ_WAKE_THREAD;
3141 static int nvi_enable(void *client, int snsr_id, int enable)
3143 struct nvi_state *st = (struct nvi_state *)client;
3146 /* return current enable request status */
3147 return st->snsr[snsr_id].enable;
3149 if (st->snsr[snsr_id].enable == enable)
3150 /* nothing has changed with enable request */
3153 st->snsr[snsr_id].enable = enable;
3155 /* officially flagged as off here */
3156 st->en_msk &= ~(1 << snsr_id);
3157 if (st->sts & NVS_STS_SUSPEND)
3158 /* speed up suspend/resume by not doing nvi_en for every dev */
3161 if (snsr_id == DEV_TMP)
3162 /* this is a static sensor that will be read when gyro is on */
3165 if (st->en_msk & (1 << DEV_DMP)) {
3166 /* DMP is currently on */
3167 if (!(st->en_msk & st->dmp_en_msk))
3168 /* DMP may get turned off (may stay on due to batch) so
3169 * we update timings that may have changed while DMP
3174 nvi_period_src(st, st->hal->dev[snsr_id]->src);
3180 static int nvi_batch(void *client, int snsr_id, int flags,
3181 unsigned int period, unsigned int timeout)
3183 struct nvi_state *st = (struct nvi_state *)client;
3186 /* We use batch to set parameters in realtime for one-shot sensors
3187 * (that normally doesn't use batch)
3189 if (SENSOR_FLAG_ONE_SHOT_MODE == (st->snsr[snsr_id].cfg.flags &
3190 REPORTING_MODE_MASK)) {
3191 st->snsr[snsr_id].cfg.delay_us_min = period;
3192 st->snsr[snsr_id].cfg.delay_us_max = timeout;
3193 st->snsr[snsr_id].cfg.report_n = flags;
3194 if (st->en_msk & (1 << DEV_DMP))
3195 ret = st->hal->dmp->fn_dev_init(st, snsr_id);
3199 if (timeout && !st->snsr[snsr_id].cfg.fifo_max_evnt_cnt)
3202 if (snsr_id == DEV_TMP)
3205 if (period == st->snsr[snsr_id].period_us &&
3206 timeout == st->snsr[snsr_id].timeout_us)
3209 st->snsr[snsr_id].period_us = period;
3210 st->snsr[snsr_id].timeout_us = timeout;
3211 if (!st->snsr[snsr_id].enable)
3214 ret = nvi_timeout(st);
3215 if (st->en_msk & (1 << DEV_DMP)) {
3216 if (st->hal->dmp->fn_dev_batch)
3217 /* batch can be done in real-time with the DMP on */
3219 ret = st->hal->dmp->fn_dev_batch(st, snsr_id, -1);
3223 ret |= nvi_period_src(st, st->hal->dev[snsr_id]->src);
3231 static int nvi_flush(void *client, int snsr_id)
3233 struct nvi_state *st = (struct nvi_state *)client;
3236 if (st->snsr[snsr_id].enable) {
3237 st->snsr[snsr_id].flush = true;
3238 ret = nvi_read(st, true);
3243 static int nvi_max_range(void *client, int snsr_id, int max_range)
3245 struct nvi_state *st = (struct nvi_state *)client;
3247 unsigned int i = max_range;
3250 if (snsr_id < 0 || snsr_id >= DEV_N)
3253 if (st->snsr[snsr_id].enable)
3254 /* can't change settings on the fly (disable device first) */
3257 if (i > st->hal->dev[snsr_id]->rr_0n)
3258 /* clamp to highest setting */
3259 i = st->hal->dev[snsr_id]->rr_0n;
3260 st->snsr[snsr_id].usr_cfg = i;
3261 st->snsr[snsr_id].cfg.resolution.ival =
3262 st->hal->dev[snsr_id]->rr[i].resolution.ival;
3263 st->snsr[snsr_id].cfg.resolution.fval =
3264 st->hal->dev[snsr_id]->rr[i].resolution.fval;
3265 st->snsr[snsr_id].cfg.max_range.ival =
3266 st->hal->dev[snsr_id]->rr[i].max_range.ival;
3267 st->snsr[snsr_id].cfg.max_range.fval =
3268 st->hal->dev[snsr_id]->rr[i].max_range.fval;
3269 st->snsr[snsr_id].cfg.offset.ival = st->hal->dev[snsr_id]->offset.ival;
3270 st->snsr[snsr_id].cfg.offset.fval = st->hal->dev[snsr_id]->offset.fval;
3271 st->snsr[snsr_id].cfg.scale.ival = st->hal->dev[snsr_id]->scale.ival;
3272 st->snsr[snsr_id].cfg.scale.fval = st->hal->dev[snsr_id]->scale.fval;
3273 /* AXIS sensors need resolution put in the scales */
3274 if (st->snsr[snsr_id].cfg.ch_n_max) {
3275 for (ch = 0; ch < st->snsr[snsr_id].cfg.ch_n_max; ch++) {
3276 st->snsr[snsr_id].cfg.scales[ch].ival =
3277 st->snsr[snsr_id].cfg.resolution.ival;
3278 st->snsr[snsr_id].cfg.scales[ch].fval =
3279 st->snsr[snsr_id].cfg.resolution.fval;
3283 if (st->en_msk & (1 << DEV_DMP))
3288 static int nvi_offset(void *client, int snsr_id, int channel, int offset)
3290 struct nvi_state *st = (struct nvi_state *)client;
3294 if (snsr_id >= DEV_AXIS_N || channel >= AXIS_N)
3297 old = st->dev_offset[snsr_id][channel];
3298 st->dev_offset[snsr_id][channel] = offset;
3299 if (st->en_msk & (1 << snsr_id)) {
3302 st->dev_offset[snsr_id][channel] = old;
3310 static int nvi_thresh_lo(void *client, int snsr_id, int thresh_lo)
3312 struct nvi_state *st = (struct nvi_state *)client;
3320 st->snsr[DEV_SM].cfg.thresh_lo = thresh_lo;
3321 if (st->en_msk & (1 << DEV_DMP))
3322 ret = st->hal->dmp->fn_dev_init(st, snsr_id);
3332 static int nvi_thresh_hi(void *client, int snsr_id, int thresh_hi)
3334 struct nvi_state *st = (struct nvi_state *)client;
3340 st->en_msk |= (1 << EN_LP);
3342 st->en_msk &= ~(1 << EN_LP);
3346 st->snsr[DEV_SM].cfg.thresh_hi = thresh_hi;
3347 if (st->en_msk & (1 << DEV_DMP))
3348 ret = st->hal->dmp->fn_dev_init(st, snsr_id);
3358 static int nvi_reset_dev(void *client, int snsr_id)
3360 struct nvi_state *st = (struct nvi_state *)client;
3363 ret = nvi_pm(st, __func__, NVI_PM_ON);
3364 ret |= nvi_wr_pm1(st, __func__, BIT_H_RESET);
3370 static int nvi_self_test(void *client, int snsr_id, char *buf)
3372 struct nvi_state *st = (struct nvi_state *)client;
3375 nvi_pm(st, __func__, NVI_PM_ON);
3376 nvi_aux_enable(st, __func__, false, false);
3377 nvi_user_ctrl_en(st, __func__, false, false, false, false);
3378 if (snsr_id == DEV_ACC)
3379 ret = st->hal->fn->st_acc(st);
3380 else if (snsr_id == DEV_GYR)
3381 ret = st->hal->fn->st_gyr(st);
3384 nvi_aux_enable(st, __func__, true, false);
3388 return snprintf(buf, PAGE_SIZE, "%d FAIL\n", ret);
3390 return snprintf(buf, PAGE_SIZE, "%d PASS\n", ret);
3393 static int nvi_regs(void *client, int snsr_id, char *buf)
3395 struct nvi_state *st = (struct nvi_state *)client;
3402 t = snprintf(buf, PAGE_SIZE, "registers: (only data != 0 shown)\n");
3403 for (j = 0; j < st->hal->reg_bank_n; j++) {
3404 t += snprintf(buf + t, PAGE_SIZE - t, "bank %u:\n", j);
3405 for (i = 0; i < st->hal->regs_n; i++) {
3406 if ((j == st->hal->reg->fifo_rw.bank) &&
3407 (i == st->hal->reg->fifo_rw.reg))
3410 ret = nvi_i2c_r(st, j, i, 1, &data);
3412 t += snprintf(buf + t, PAGE_SIZE - t,
3415 t += snprintf(buf + t, PAGE_SIZE - t,
3416 "0x%02x=0x%02x\n", i, data);
3422 static int nvi_nvs_write(void *client, int snsr_id, unsigned int nvs)
3424 struct nvi_state *st = (struct nvi_state *)client;
3426 switch (nvs & 0xFF) {
3429 case NVI_INFO_REG_WR:
3430 case NVI_INFO_MEM_RD:
3431 case NVI_INFO_MEM_WR:
3432 case NVI_INFO_DMP_FW:
3433 case NVI_INFO_DMP_EN_MSK:
3434 case NVI_INFO_FN_INIT:
3437 case NVI_INFO_DBG_SPEW:
3438 st->sts ^= NVI_DBG_SPEW_MSG;
3441 case NVI_INFO_AUX_SPEW:
3442 st->sts ^= NVI_DBG_SPEW_AUX;
3443 nvi_aux_dbg(st, "SNAPSHOT", 0);
3446 case NVI_INFO_FIFO_SPEW:
3447 st->sts ^= NVI_DBG_SPEW_FIFO;
3450 case NVI_INFO_TS_SPEW:
3451 st->sts ^= NVI_DBG_SPEW_TS;
3455 if (nvs < (NVI_INFO_SNSR_SPEW + DEV_N))
3456 st->sts ^= (NVI_DBG_SPEW_SNSR <<
3457 (nvs - NVI_INFO_SNSR_SPEW));
3466 static int nvi_nvs_read(void *client, int snsr_id, char *buf)
3468 struct nvi_state *st = (struct nvi_state *)client;
3477 st->info = NVI_INFO_VER;
3478 switch (info & 0xFF) {
3480 t = snprintf(buf, PAGE_SIZE, "NVI driver v. %u\n",
3481 NVI_DRIVER_VERSION);
3482 if (st->en_msk & (1 << FW_LOADED)) {
3483 t += snprintf(buf + t, PAGE_SIZE - t, "DMP FW v. %u\n",
3484 st->hal->dmp->fw_ver);
3485 t += snprintf(buf + t, PAGE_SIZE - t,
3487 !!(st->en_msk & (1 << DEV_DMP)));
3489 t += snprintf(buf + t, PAGE_SIZE - t, "standby_en=%x\n",
3490 !!(st->en_msk & (1 << EN_STDBY)));
3491 t += snprintf(buf + t, PAGE_SIZE - t, "bypass_timeout_ms=%u\n",
3492 st->bypass_timeout_ms);
3493 for (i = 0; i < DEV_N_AUX; i++) {
3494 if (st->snsr[i].push_delay_ns)
3495 t += snprintf(buf + t, PAGE_SIZE - t,
3496 "%s_push_delay_ns=%lld\n",
3497 st->snsr[i].cfg.name,
3498 st->snsr[i].push_delay_ns);
3501 for (i = 0; i < DEV_N_AUX; i++) {
3502 if ((st->dmp_dev_msk | MSK_DEV_MPU_AUX) & (1 << i)) {
3503 if (st->dmp_en_msk & (1 << i))
3504 t += snprintf(buf + t, PAGE_SIZE - t,
3506 st->snsr[i].cfg.name);
3508 t += snprintf(buf + t, PAGE_SIZE - t,
3510 st->snsr[i].cfg.name);
3517 t = snprintf(buf, PAGE_SIZE, "en_msk=%x\n", st->en_msk);
3518 t += snprintf(buf + t, PAGE_SIZE - t, "sts=%x\n", st->sts);
3519 t += snprintf(buf + t, PAGE_SIZE - t, "pm=%d\n", st->pm);
3520 t += snprintf(buf + t, PAGE_SIZE - t, "bm_timeout_us=%u\n",
3522 t += snprintf(buf + t, PAGE_SIZE - t, "fifo_src=%d\n",
3524 for (i = 0; i < DEV_N_AUX; i++) {
3525 t += snprintf(buf + t, PAGE_SIZE - t, "snsr[%u] %s:\n",
3526 i, st->snsr[i].cfg.name);
3527 t += snprintf(buf + t, PAGE_SIZE - t, "usr_cfg=%x\n",
3528 st->snsr[i].usr_cfg);
3529 t += snprintf(buf + t, PAGE_SIZE - t, "enable=%x\n",
3530 st->snsr[i].enable);
3531 t += snprintf(buf + t, PAGE_SIZE - t, "period_us=%u\n",
3532 st->snsr[i].period_us);
3533 t += snprintf(buf + t, PAGE_SIZE - t,
3535 st->snsr[i].timeout_us);
3536 t += snprintf(buf + t, PAGE_SIZE - t, "odr=%u\n",
3538 t += snprintf(buf + t, PAGE_SIZE - t, "ts_last=%lld\n",
3539 st->snsr[i].ts_last);
3540 t += snprintf(buf + t, PAGE_SIZE - t, "ts_reset=%x\n",
3541 st->snsr[i].ts_reset);
3542 t += snprintf(buf + t, PAGE_SIZE - t, "flush=%x\n",
3544 t += snprintf(buf + t, PAGE_SIZE - t, "matrix=%x\n",
3545 st->snsr[i].matrix);
3546 t += snprintf(buf + t, PAGE_SIZE - t, "buf_shft=%d\n",
3547 st->snsr[i].buf_shft);
3548 t += snprintf(buf + t, PAGE_SIZE - t, "buf_n=%u\n",
3554 st->hal->dmp->fn_clk_n(st, &n);
3555 t += snprintf(buf + t, PAGE_SIZE - t,
3556 "nvi_dmp_clk_n=%u\n", n);
3557 t += snprintf(buf + t, PAGE_SIZE - t,
3558 "st->dmp_clk_n=%u\n", st->dmp_clk_n);
3563 for (i = 0; i < SRC_N; i++) {
3564 if (i >= st->hal->src_n && i != SRC_DMP)
3567 t += snprintf(buf + t, PAGE_SIZE - t, "src[%u]:\n", i);
3568 t += snprintf(buf + t, PAGE_SIZE - t, "ts_reset=%x\n",
3569 st->src[i].ts_reset);
3570 t += snprintf(buf + t, PAGE_SIZE - t, "ts_end=%lld\n",
3572 t += snprintf(buf + t, PAGE_SIZE - t,
3574 st->src[i].ts_period);
3575 t += snprintf(buf + t, PAGE_SIZE - t,
3576 "period_us_src=%u\n",
3577 st->src[i].period_us_src);
3578 t += snprintf(buf + t, PAGE_SIZE - t,
3579 "period_us_req=%u\n",
3580 st->src[i].period_us_req);
3581 t += snprintf(buf + t, PAGE_SIZE - t,
3582 "period_us_min=%u\n",
3583 st->src[i].period_us_min);
3584 t += snprintf(buf + t, PAGE_SIZE - t,
3585 "period_us_max=%u\n",
3586 st->src[i].period_us_max);
3587 t += snprintf(buf + t, PAGE_SIZE - t,
3589 st->src[i].fifo_data_n);
3590 t += snprintf(buf + t, PAGE_SIZE - t, "base_t=%u\n",
3595 case NVI_INFO_DBG_SPEW:
3596 return snprintf(buf, PAGE_SIZE, "DBG spew=%x\n",
3597 !!(st->sts & NVI_DBG_SPEW_MSG));
3599 case NVI_INFO_AUX_SPEW:
3600 return snprintf(buf, PAGE_SIZE, "AUX spew=%x\n",
3601 !!(st->sts & NVI_DBG_SPEW_AUX));
3603 case NVI_INFO_FIFO_SPEW:
3604 return snprintf(buf, PAGE_SIZE, "FIFO spew=%x\n",
3605 !!(st->sts & NVI_DBG_SPEW_FIFO));
3607 case NVI_INFO_TS_SPEW:
3608 return snprintf(buf, PAGE_SIZE, "TS spew=%x\n",
3609 !!(st->sts & NVI_DBG_SPEW_TS));
3611 case NVI_INFO_REG_WR:
3613 buf_rw[0] = (u8)(info >> 16);
3614 buf_rw[1] = (u8)(info >> 8);
3615 ret = nvi_i2c_write(st, info >> 24, 2, buf_rw);
3616 return snprintf(buf, PAGE_SIZE,
3617 "REG WR: b=%02x r=%02x d=%02x ERR=%d\n",
3618 info >> 24, buf_rw[0], buf_rw[1], ret);
3620 case NVI_INFO_MEM_RD:
3621 n = (info >> 8) & 0xFF;
3624 ret = nvi_mem_rd(st, info >> 16, n, buf_rw);
3626 return snprintf(buf, PAGE_SIZE,
3627 "MEM RD: ERR=%d\n", ret);
3629 t = snprintf(buf, PAGE_SIZE, "MEM RD:\n");
3630 for (i = 0; i < n; i++) {
3632 t += snprintf(buf + t, PAGE_SIZE - t, "%04x: ",
3634 t += snprintf(buf + t, PAGE_SIZE - t, "%02x ",
3637 t += snprintf(buf + t, PAGE_SIZE - t, "\n");
3639 t += snprintf(buf + t, PAGE_SIZE - t, "\n");
3642 case NVI_INFO_MEM_WR:
3644 buf_rw[0] = (u8)(info >> 8);
3645 ret = nvi_mem_wr(st, info >> 16, 1, buf_rw, true);
3646 return snprintf(buf, PAGE_SIZE,
3647 "MEM WR: a=%04x d=%02x ERR=%d\n",
3648 info >> 16, buf_rw[0], ret);
3650 case NVI_INFO_DMP_FW:
3651 ret = nvi_dmp_fw(st);
3652 return snprintf(buf, PAGE_SIZE, "DMP FW: ERR=%d\n", ret);
3654 case NVI_INFO_DMP_EN_MSK:
3655 st->dmp_en_msk = (info >> 8) & MSK_DEV_ALL;
3656 return snprintf(buf, PAGE_SIZE, "st->dmp_en_msk=%x\n",
3659 case NVI_INFO_FN_INIT:
3660 if (st->hal->fn->init) {
3661 ret = st->hal->fn->init(st);
3662 return snprintf(buf, PAGE_SIZE,
3663 "hal->fn->init() ret=%d\n", ret);
3665 return snprintf(buf, PAGE_SIZE,
3666 "no hal->fn->init()\n");
3670 i = info - NVI_INFO_SNSR_SPEW;
3672 return snprintf(buf, PAGE_SIZE, "%s spew=%x\n",
3673 st->snsr[i].cfg.name,
3674 !!(st->sts & (NVI_DBG_SPEW_SNSR << i)));
3681 static struct nvs_fn_dev nvi_nvs_fn = {
3682 .enable = nvi_enable,
3685 .max_range = nvi_max_range,
3686 .offset = nvi_offset,
3687 .thresh_lo = nvi_thresh_lo,
3688 .thresh_hi = nvi_thresh_hi,
3689 .reset = nvi_reset_dev,
3690 .self_test = nvi_self_test,
3692 .nvs_write = nvi_nvs_write,
3693 .nvs_read = nvi_nvs_read,
3697 static int nvi_suspend(struct device *dev)
3699 struct i2c_client *client = to_i2c_client(dev);
3700 struct nvi_state *st = i2c_get_clientdata(client);
3704 s64 ts = 0; /* = 0 to fix compile */
3706 if (st->sts & NVS_STS_SPEW_MSG)
3707 ts = nvs_timestamp();
3708 st->sts |= NVS_STS_SUSPEND;
3710 for (i = 0; i < DEV_N; i++)
3711 ret_t |= st->nvs->suspend(st->snsr[i].nvs_st);
3715 ret_t |= nvi_en(st);
3716 for (i = 0; i < DEV_N; i++) {
3717 if (st->snsr[i].enable && (st->snsr[i].cfg.flags &
3718 SENSOR_FLAG_WAKE_UP)) {
3719 ret = irq_set_irq_wake(st->i2c->irq, 1);
3721 st->irq_set_irq_wake = true;
3726 if (st->sts & NVS_STS_SPEW_MSG)
3727 dev_info(&client->dev,
3728 "%s WAKE_ON=%x elapsed_t=%lldns err=%d\n", __func__,
3729 st->irq_set_irq_wake, nvs_timestamp() - ts, ret_t);
3730 nvi_mutex_unlock(st);
3734 static int nvi_resume(struct device *dev)
3736 struct i2c_client *client = to_i2c_client(dev);
3737 struct nvi_state *st = i2c_get_clientdata(client);
3738 s64 ts = 0; /* = 0 to fix compile */
3742 if (st->sts & NVS_STS_SPEW_MSG)
3743 ts = nvs_timestamp();
3745 if (st->irq_set_irq_wake) {
3746 /* determine if wake source */
3747 ret = nvi_rd_int_status(st);
3749 dev_err(&client->dev, "%s IRQ STS ERR=%d\n",
3752 if (st->sts & NVS_STS_SPEW_MSG)
3753 dev_info(&client->dev,
3754 "%s IRQ STS=%#x DMP=%#x\n", __func__,
3755 st->rc.int_status, st->rc.int_dmp);
3756 if (st->rc.int_status & (1 << st->hal->bit->int_dmp)) {
3757 if (st->rc.int_dmp &
3758 (1 << st->hal->bit->dmp_int_sm))
3759 nvi_push_oneshot(st, DEV_SM);
3762 ret = irq_set_irq_wake(st->i2c->irq, 0);
3764 st->irq_set_irq_wake = false;
3766 nvi_mutex_unlock(st);
3769 for (i = 0; i < DEV_N; i++)
3770 ret |= st->nvs->resume(st->snsr[i].nvs_st);
3774 for (i = 0; i < AUX_PORT_MAX; i++) {
3775 if (st->aux.port[i].nmp.shutdown_bypass)
3778 if (i < AUX_PORT_MAX) {
3779 nvi_pm(st, __func__, NVI_PM_ON);
3780 nvi_aux_bypass_enable(st, false);
3782 st->sts &= ~NVS_STS_SUSPEND;
3785 if (st->sts & NVS_STS_SPEW_MSG)
3786 dev_info(&client->dev, "%s elapsed_t=%lldns err=%d\n",
3787 __func__, nvs_timestamp() - ts, ret);
3788 nvi_mutex_unlock(st);
3792 static const struct dev_pm_ops nvi_pm_ops = {
3793 .suspend = nvi_suspend,
3794 .resume = nvi_resume,
3797 static void nvi_shutdown(struct i2c_client *client)
3799 struct nvi_state *st = i2c_get_clientdata(client);
3802 st->sts |= NVS_STS_SHUTDOWN;
3804 for (i = 0; i < DEV_N; i++)
3805 st->nvs->shutdown(st->snsr[i].nvs_st);
3807 nvi_disable_irq(st);
3809 nvi_user_ctrl_en(st, __func__, false, false, false, false);
3810 nvi_pm(st, __func__, NVI_PM_OFF);
3812 if (st->sts & NVS_STS_SPEW_MSG)
3813 dev_info(&client->dev, "%s\n", __func__);
3816 static int nvi_remove(struct i2c_client *client)
3818 struct nvi_state *st = i2c_get_clientdata(client);
3822 nvi_shutdown(client);
3824 for (i = 0; i < DEV_N; i++)
3825 st->nvs->remove(st->snsr[i].nvs_st);
3829 dev_info(&client->dev, "%s\n", __func__);
3833 static struct nvi_id_hal nvi_id_hals[] = {
3834 { NVI_HW_ID_AUTO, NVI_NAME, &nvi_hal_6050 },
3835 { NVI_HW_ID_MPU6050, NVI_NAME_MPU6050, &nvi_hal_6050 },
3836 { NVI_HW_ID_MPU6500, NVI_NAME_MPU6500, &nvi_hal_6500 },
3837 { NVI_HW_ID_MPU6515, NVI_NAME_MPU6515, &nvi_hal_6515 },
3838 { NVI_HW_ID_MPU9150, NVI_NAME_MPU9150, &nvi_hal_6050 },
3839 { NVI_HW_ID_MPU9250, NVI_NAME_MPU9250, &nvi_hal_6500 },
3840 { NVI_HW_ID_MPU9350, NVI_NAME_MPU9350, &nvi_hal_6515 },
3841 { NVI_HW_ID_ICM20628, NVI_NAME_ICM20628, &nvi_hal_20628 },
3842 { NVI_HW_ID_ICM20630, NVI_NAME_ICM20630, &nvi_hal_20628 },
3843 { NVI_HW_ID_ICM20632, NVI_NAME_ICM20632, &nvi_hal_20628 },
3846 static int nvi_id2hal(struct nvi_state *st, u8 hw_id)
3850 for (i = 1; i < (int)ARRAY_SIZE(nvi_id_hals); i++) {
3851 if (nvi_id_hals[i].hw_id == hw_id) {
3852 st->hal = nvi_id_hals[i].hal;
3860 static int nvi_id_dev(struct nvi_state *st,
3861 const struct i2c_device_id *i2c_dev_id)
3863 u8 hw_id = NVI_HW_ID_AUTO;
3864 unsigned int i = i2c_dev_id->driver_data;
3869 BUG_ON(NVI_NDX_N != ARRAY_SIZE(nvi_i2c_device_id) - 1);
3870 BUG_ON(NVI_NDX_N != ARRAY_SIZE(nvi_id_hals));
3871 st->hal = nvi_id_hals[i].hal;
3872 if (i == NVI_NDX_AUTO) {
3873 nvi_pm_wr(st, __func__, 0, 0, 0);
3874 ret = nvi_i2c_rd(st, &st->hal->reg->who_am_i, &hw_id);
3876 dev_err(&st->i2c->dev, "%s AUTO ID FAILED\n",
3881 ret = nvi_id2hal(st, hw_id);
3883 st->hal = &nvi_hal_20628;
3884 /* cause a master reset by disabling regulators */
3885 nvs_vregs_disable(&st->i2c->dev, st->vreg,
3886 ARRAY_SIZE(nvi_vregs));
3887 ret = nvi_pm_wr(st, __func__, 0, 0, 0);
3888 ret = nvi_i2c_rd(st, &st->hal->reg->who_am_i, &hw_id);
3890 dev_err(&st->i2c->dev, "%s AUTO ID FAILED\n",
3895 ret = nvi_id2hal(st, hw_id);
3897 dev_err(&st->i2c->dev,
3898 "%s hw_id=%x AUTO ID FAILED\n",
3906 /* cause a master reset by disabling regulators */
3907 nvs_vregs_disable(&st->i2c->dev, st->vreg,
3908 ARRAY_SIZE(nvi_vregs));
3909 nvi_pm_wr(st, __func__, 0, 0, 0);
3912 /* populate the rest of st->snsr[dev].cfg */
3913 for (dev = 0; dev < DEV_N; dev++) {
3914 st->snsr[dev].cfg.part = nvi_id_hals[i].name;
3915 st->snsr[dev].cfg.version = st->hal->dev[dev]->version;
3916 st->snsr[dev].cfg.milliamp.ival =
3917 st->hal->dev[dev]->milliamp.ival;
3918 st->snsr[dev].cfg.milliamp.fval =
3919 st->hal->dev[dev]->milliamp.fval;
3922 ret = nvs_vregs_sts(st->vreg, ARRAY_SIZE(nvi_vregs));
3924 /* regulators aren't supported so manually do master reset */
3925 nvi_wr_pm1(st, __func__, BIT_H_RESET);
3926 for (i = 0; i < AXIS_N; i++) {
3927 st->rom_offset[DEV_ACC][i] = (s16)st->rc.accel_offset[i];
3928 st->rom_offset[DEV_GYR][i] = (s16)st->rc.gyro_offset[i];
3929 st->dev_offset[DEV_ACC][i] = 0;
3930 st->dev_offset[DEV_GYR][i] = 0;
3933 BUG_ON(SRC_N < st->hal->src_n);
3934 if (st->hal->fn->init)
3935 ret = st->hal->fn->init(st);
3938 /* set sensor period limits after st->hal->fn->init executes */
3939 for (dev = 0; dev < DEV_N; dev++) {
3940 src = st->hal->dev[dev]->src;
3944 if (SENSOR_FLAG_ONE_SHOT_MODE == (st->snsr[dev].cfg.flags &
3945 REPORTING_MODE_MASK))
3948 BUG_ON(src >= st->hal->src_n);
3949 st->snsr[dev].cfg.delay_us_min = st->src[src].period_us_min;
3950 st->snsr[dev].cfg.delay_us_max = st->src[src].period_us_max;
3953 if (hw_id == NVI_HW_ID_AUTO)
3954 dev_info(&st->i2c->dev, "%s: USING DEVICE TREE: %s\n",
3955 __func__, i2c_dev_id->name);
3957 dev_info(&st->i2c->dev, "%s: FOUND HW ID=%x USING: %s\n",
3958 __func__, hw_id, st->snsr[0].cfg.part);
3962 static struct sensor_cfg nvi_cfg_dflt[] = {
3964 .name = "accelerometer",
3970 .vendor = NVI_VENDOR,
3971 .float_significance = NVS_FLOAT_NANO,
3973 .thresh_hi = -1, /* LP */
3976 .name = "gyroscope",
3982 .vendor = NVI_VENDOR,
3986 .float_significance = NVS_FLOAT_NANO,
3990 .name = "gyro_temp",
3991 .snsr_id = SENSOR_TYPE_TEMPERATURE,
3994 .vendor = NVI_VENDOR,
3995 .flags = SENSOR_FLAG_ON_CHANGE_MODE,
3996 .float_significance = NVS_FLOAT_NANO,
3999 .name = "significant_motion",
4003 .vendor = NVI_VENDOR,
4005 /* delay_us_max is ignored by NVS since this is a one-shot
4006 * sensor so we use it as a third threshold parameter
4008 .delay_us_max = 200, /* SMD_DELAY2_THLD */
4009 .flags = SENSOR_FLAG_ONE_SHOT_MODE |
4010 SENSOR_FLAG_WAKE_UP,
4011 .thresh_lo = 1500, /* SMD_MOT_THLD */
4012 .thresh_hi = 600, /* SMD_DELAY_THLD */
4015 .name = "step_detector",
4019 .vendor = NVI_VENDOR,
4021 .flags = SENSOR_FLAG_ONE_SHOT_MODE,
4024 .name = "quaternion",
4025 .snsr_id = SENSOR_TYPE_ORIENTATION,
4029 .vendor = NVI_VENDOR,
4030 .delay_us_min = 10000,
4031 .delay_us_max = 255000,
4034 .name = "geomagnetic_rotation_vector",
4039 .vendor = NVI_VENDOR,
4040 .delay_us_min = 10000,
4041 .delay_us_max = 255000,
4044 .name = "gyroscope_uncalibrated",
4049 .vendor = NVI_VENDOR,
4053 .delay_us_min = 10000,
4054 .delay_us_max = 255000,
4055 .float_significance = NVS_FLOAT_NANO,
4060 /* device tree parameters before HAL initialized */
4061 static int nvi_of_dt_pre(struct nvi_state *st, struct device_node *dn)
4067 for (i = 0; i < ARRAY_SIZE(nvi_cfg_dflt); i++)
4068 memcpy(&st->snsr[i].cfg, &nvi_cfg_dflt[i],
4069 sizeof(st->snsr[i].cfg));
4070 st->snsr[DEV_AUX].cfg.name = "auxiliary";
4071 st->en_msk = (1 << EN_STDBY);
4072 st->bypass_timeout_ms = NVI_BYPASS_TIMEOUT_MS;
4076 /* driver specific parameters */
4077 if (!of_property_read_u32(dn, "standby_en", &tmp)) {
4079 st->en_msk |= (1 << EN_STDBY);
4081 st->en_msk &= ~(1 << EN_STDBY);
4083 of_property_read_u32(dn, "bypass_timeout_ms", &st->bypass_timeout_ms);
4084 for (i = 0; i < DEV_N_AUX; i++) {
4085 snprintf(str, sizeof(str), "%s_push_delay_ns",
4086 st->snsr[i].cfg.name);
4087 if (!of_property_read_u32(dn, str, &tmp))
4088 st->snsr[i].push_delay_ns = (s64)tmp;
4094 /* device tree parameters after HAL initialized */
4095 static void nvi_of_dt_post(struct nvi_state *st, struct device_node *dn)
4103 /* sensor specific parameters */
4104 for (i = 0; i < DEV_N; i++)
4105 nvs_of_dt(dn, &st->snsr[i].cfg, NULL);
4107 /* nvs_of_dt doesn't allow REPORTING_MODE_MASK bits to change so we
4108 * allow for it here so that we can configure whether batch sysfs nodes
4109 * are populated to be used to configure significant motion parameters
4112 if (!of_property_read_u32(dn, "significant_motion_flags", &tmp)) {
4113 msk = SENSOR_FLAG_READONLY_MASK & ~REPORTING_MODE_MASK;
4115 st->snsr[DEV_SM].cfg.flags &= msk;
4116 st->snsr[DEV_SM].cfg.flags |= tmp;
4118 for (i = 0; i < DEV_N; i++) {
4120 for (j = 0; j < 9; j++)
4121 tmp |= st->snsr[i].cfg.matrix[j];
4123 /* sensor has a matrix */
4124 snprintf(str, sizeof(str), "%s_matrix_enable",
4125 st->snsr[i].cfg.name);
4126 if (!of_property_read_u32(dn, str, &tmp)) {
4127 /* matrix override */
4129 /* apply matrix within kernel */
4130 st->snsr[i].matrix = true;
4132 /* HAL/fusion will handle matrix */
4133 st->snsr[i].matrix = false;
4138 /* sensor overrides that enable the DMP.
4139 * If the sensor is specific to the DMP and this override is
4140 * disable, then the virtual sensor is removed.
4143 st->dmp_dev_msk = st->hal->dmp->dev_msk;
4144 st->dmp_en_msk = st->hal->dmp->en_msk;
4145 for (i = 0; i < DEV_N_AUX; i++) {
4146 snprintf(str, sizeof(str), "%s_dmp_en",
4147 st->snsr[i].cfg.name);
4148 if (!of_property_read_u32(dn, str, &tmp)) {
4151 if (MSK_DEV_DMP & msk)
4152 st->dmp_dev_msk |= msk;
4153 st->dmp_en_msk |= msk;
4156 if (MSK_DEV_DMP & (1 << i))
4157 st->dmp_dev_msk &= msk;
4158 st->dmp_en_msk &= msk;
4165 static int nvi_init(struct nvi_state *st,
4166 const struct i2c_device_id *i2c_dev_id)
4168 struct mpu_platform_data *pdata;
4169 signed char matrix[9];
4174 nvi_of_dt_pre(st, st->i2c->dev.of_node);
4176 ret = nvi_id_dev(st, i2c_dev_id);
4180 if (st->i2c->dev.of_node) {
4181 nvi_of_dt_post(st, st->i2c->dev.of_node);
4183 pdata = dev_get_platdata(&st->i2c->dev);
4185 memcpy(&st->snsr[DEV_ACC].cfg.matrix,
4186 &pdata->orientation,
4187 sizeof(st->snsr[DEV_ACC].cfg.matrix));
4188 memcpy(&st->snsr[DEV_GYR].cfg.matrix,
4189 &pdata->orientation,
4190 sizeof(st->snsr[DEV_GYR].cfg.matrix));
4192 dev_err(&st->i2c->dev, "%s dev_get_platdata ERR\n",
4198 if (st->en_msk & (1 << FW_LOADED))
4201 ret = nvi_dmp_fw(st);
4203 /* remove DMP dependent sensors */
4206 dev_info(&st->i2c->dev, "%s DMP FW loaded\n", __func__);
4207 /* remove DMP dependent sensors not supported by this DMP */
4208 n = MSK_DEV_DMP ^ st->dmp_dev_msk;
4211 for (i = 0; i < DEV_N; i++) {
4213 st->snsr[i].cfg.snsr_id = -1;
4217 nvi_nvs_fn.sts = &st->sts;
4218 nvi_nvs_fn.errs = &st->errs;
4219 st->nvs = nvs_iio();
4220 if (st->nvs == NULL)
4224 for (i = 0; i < DEV_N; i++) {
4225 if (st->snsr[i].matrix) {
4226 /* matrix handled at kernel so remove from NVS */
4227 memcpy(matrix, st->snsr[i].cfg.matrix, sizeof(matrix));
4228 memset(st->snsr[i].cfg.matrix, 0,
4229 sizeof(st->snsr[i].cfg.matrix));
4231 ret = st->nvs->probe(&st->snsr[i].nvs_st, st, &st->i2c->dev,
4232 &nvi_nvs_fn, &st->snsr[i].cfg);
4234 st->snsr[i].cfg.snsr_id = i;
4235 if (st->snsr[i].matrix)
4236 memcpy(st->snsr[i].cfg.matrix, matrix,
4237 sizeof(st->snsr[i].cfg.matrix));
4238 nvi_max_range(st, i, st->snsr[i].cfg.max_range.ival);
4245 /* restore SENSOR_FLAG_ONE_SHOT_MODE for significant motion in case it
4246 * was cleared to allow realtime calibration with batch.
4248 st->snsr[DEV_SM].cfg.flags &= ~REPORTING_MODE_MASK;
4249 st->snsr[DEV_SM].cfg.flags |= SENSOR_FLAG_ONE_SHOT_MODE;
4250 ret = request_threaded_irq(st->i2c->irq, nvi_handler, nvi_thread,
4251 IRQF_TRIGGER_RISING, NVI_NAME, st);
4253 dev_err(&st->i2c->dev, "%s req_threaded_irq ERR %d\n",
4258 nvi_pm(st, __func__, NVI_PM_AUTO);
4259 nvi_rc_clr(st, __func__);
4260 st->rc_dis = false; /* enable register cache after initialization */
4261 nvi_state_local = st;
4265 static void nvi_dmp_fw_load_worker(struct work_struct *work)
4267 struct nvi_pdata *pd = container_of(work, struct nvi_pdata,
4269 struct nvi_state *st = &pd->st;
4272 ret = nvi_init(st, pd->i2c_dev_id);
4274 dev_err(&st->i2c->dev, "%s ERR %d\n", __func__, ret);
4275 nvi_remove(st->i2c);
4277 dev_info(&st->i2c->dev, "%s done\n", __func__);
4280 static int nvi_probe(struct i2c_client *client,
4281 const struct i2c_device_id *i2c_dev_id)
4283 struct nvi_pdata *pd;
4284 struct nvi_state *st;
4287 dev_info(&client->dev, "%s %s\n", __func__, i2c_dev_id->name);
4289 dev_err(&client->dev, "%s ERR: no interrupt\n", __func__);
4293 /* just test if global disable */
4294 ret = nvs_of_dt(client->dev.of_node, NULL, NULL);
4295 if (ret == -ENODEV) {
4296 dev_info(&client->dev, "%s DT disabled\n", __func__);
4300 pd = devm_kzalloc(&client->dev, sizeof(*pd), GFP_KERNEL);
4305 i2c_set_clientdata(client, pd);
4306 st->rc_dis = true; /* disable register cache during initialization */
4308 pd->i2c_dev_id = i2c_dev_id;
4309 /* Init fw load worker thread */
4310 INIT_WORK(&pd->fw_load_work, nvi_dmp_fw_load_worker);
4311 schedule_work(&pd->fw_load_work);
4315 MODULE_DEVICE_TABLE(i2c, nvi_i2c_device_id);
4317 static const struct of_device_id nvi_of_match[] = {
4318 { .compatible = "invensense,mpu6xxx", },
4319 { .compatible = "invensense,mpu6050", },
4320 { .compatible = "invensense,mpu6500", },
4321 { .compatible = "invensense,mpu6515", },
4322 { .compatible = "invensense,mpu9150", },
4323 { .compatible = "invensense,mpu9250", },
4324 { .compatible = "invensense,mpu9350", },
4325 { .compatible = "invensense,icm20628", },
4326 { .compatible = "invensense,icm20630", },
4327 { .compatible = "invensense,icm20632", },
4331 MODULE_DEVICE_TABLE(of, nvi_of_match);
4333 static struct i2c_driver nvi_i2c_driver = {
4334 .class = I2C_CLASS_HWMON,
4336 .remove = nvi_remove,
4337 .shutdown = nvi_shutdown,
4340 .owner = THIS_MODULE,
4341 .of_match_table = of_match_ptr(nvi_of_match),
4344 .id_table = nvi_i2c_device_id,
4347 module_i2c_driver(nvi_i2c_driver);
4349 MODULE_LICENSE("GPL");
4350 MODULE_DESCRIPTION("NVidiaInvensense driver");
4351 MODULE_AUTHOR("NVIDIA Corporation");