+ ret = bmp_nvi_mpu_bypass_request(st);
+ if (!ret) {
+ ret = bmp_wr(st, BMP280_REG_CONFIG, t_sb);
+ if (ret)
+ mode = BMP_REG_CTRL_MODE_SLEEP;
+ if (st->sts & NVS_STS_SPEW_MSG)
+ dev_info(&st->i2c->dev, "%s cfg=%x err=%d\n",
+ __func__, t_sb, ret);
+ ret |= bmp_mode_wr(st, mode);
+ bmp_nvi_mpu_bypass_release(st);
+ }
+ } else {
+ if (scale_user) {
+ scale_i = scale_user - 1;
+ us = st->hal->dev[i].scale[scale_i].delay_ms * 1000;
+ } else {
+ /* scale is automatic based on period */
+ scale_i = 0;
+ for (; scale_i < st->hal->dev[i].scale_n; scale_i++) {
+ us = st->hal->dev[i].scale[scale_i].delay_ms;
+ us *= 1000;
+ if (period_us >= us)
+ /* HW (us) is fast enough */
+ break;
+ }
+
+ if (scale_i >= st->hal->dev[i].scale_n)
+ scale_i = st->hal->dev[i].scale_n - 1;
+ }
+
+ if (period_us < us)
+ period_us = us;
+#if BMP_NVI_MPU_SUPPORT
+ if (scale_i != st->scale_i && st->mpu_en &&
+ st->port_id[WR] >= 0)
+ ret = nvi_mpu_delay_ms(st->port_id[WR],
+ st->hal->dev[i].scale[scale_i].delay_ms);
+#endif /* BMP_NVI_MPU_SUPPORT */
+ if (st->dev_id == BMP_REG_ID_BMP180) {
+ mode = st->hal->dev[i].scale[scale_i].os <<
+ BMP180_REG_CTRL_OSS;
+ mode |= BMP180_REG_CTRL_MODE_TEMP;
+ } else {
+ mode = BMP280_REG_CTRL_MODE_FORCED1;
+ mode |= st->hal->dev[BMP_DEV_TMP].scale[scale_i].os <<
+ BMP280_REG_CTRL_OSRS_T;
+ if (enable & (1 << BMP_DEV_PRS))
+ mode |= st->hal->dev[i].scale[scale_i].os <<
+ BMP280_REG_CTRL_OSRS_P;
+ }
+ ret |= bmp_mode_wr(st, mode);
+ }
+ if (!ret) {
+ if (period_us != st->poll_delay_us) {
+ if (st->sts & NVS_STS_SPEW_MSG)
+ dev_info(&st->i2c->dev, "%s: period_us=%u\n",
+ __func__, period_us);
+#if BMP_NVI_MPU_SUPPORT
+ if (st->mpu_en) {
+ us = -1;
+ for (i = 0; i < BMP_DEV_N; i++) {
+ if ((enable & (1 << i)) &&
+ st->timeout_us[i] < us)
+ us = st->timeout_us[i];
+ }
+ ret = nvi_mpu_batch(st->port_id[RD],
+ period_us, us);
+ }
+ if (!ret)
+#endif /* BMP_NVI_MPU_SUPPORT */
+ st->poll_delay_us = period_us;
+ }
+ if (!ret)
+ st->scale_i = scale_i;