]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
Merge tag 'iomap-5.3-merge-4' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 19 Jul 2019 18:38:12 +0000 (11:38 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 19 Jul 2019 18:38:12 +0000 (11:38 -0700)
Pull iomap split/cleanup from Darrick Wong:
 "As promised, here's the second part of the iomap merge for 5.3, in
  which we break up iomap.c into smaller files grouped by functional
  area so that it'll be easier in the long run to maintain cohesiveness
  of code units and to review incoming patches. There are no functional
  changes and fs/iomap.c split cleanly.

  Summary:

   - Regroup the fs/iomap.c code by major functional area so that we can
     start development for 5.4 from a more stable base"

* tag 'iomap-5.3-merge-4' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
  iomap: move internal declarations into fs/iomap/
  iomap: move the main iteration code into a separate file
  iomap: move the buffered IO code into a separate file
  iomap: move the direct IO code into a separate file
  iomap: move the SEEK_HOLE code into a separate file
  iomap: move the file mapping reporting code into a separate file
  iomap: move the swapfile code into a separate file
  iomap: start moving code to fs/iomap/

1  2 
MAINTAINERS
fs/dax.c
fs/internal.h
fs/iomap/buffered-io.c

diff --combined MAINTAINERS
index 2d766db67bb082f50a04ab55d4f3125c3c1beffe,1086ac7b0f0525c98abe43a1b47fcf87fb45fa49..a99adee5471a1a7a1e2041740c3f0b0165a55796
@@@ -1155,7 -1155,7 +1155,7 @@@ APPLIED MICRO (APM) X-GENE SOC PM
  M:    Khuong Dinh <khuong@os.amperecomputing.com>
  S:    Supported
  F:    drivers/perf/xgene_pmu.c
 -F:    Documentation/perf/xgene-pmu.txt
 +F:    Documentation/admin-guide/perf/xgene-pmu.rst
  F:    Documentation/devicetree/bindings/perf/apm-xgene-pmu.txt
  
  APTINA CAMERA SENSOR PLL
@@@ -2218,7 -2218,7 +2218,7 @@@ F:      drivers/*/*s3c64xx
  F:    drivers/*/*s5pv210*
  F:    drivers/memory/samsung/*
  F:    drivers/soc/samsung/*
 -F:    Documentation/arm/Samsung/
 +F:    Documentation/arm/samsung/
  F:    Documentation/devicetree/bindings/arm/samsung/
  F:    Documentation/devicetree/bindings/sram/samsung-sram.txt
  F:    Documentation/devicetree/bindings/power/pd-samsung.txt
@@@ -2689,7 -2689,7 +2689,7 @@@ ATA OVER ETHERNET (AOE) DRIVE
  M:    "Justin Sanders" <justin@coraid.com>
  W:    http://www.openaoe.org/
  S:    Supported
 -F:    Documentation/aoe/
 +F:    Documentation/admin-guide/aoe/
  F:    drivers/block/aoe/
  
  ATHEROS 71XX/9XXX GPIO DRIVER
@@@ -2968,7 -2968,7 +2968,7 @@@ M:      Jens Axboe <axboe@kernel.dk
  L:    linux-block@vger.kernel.org
  S:    Maintained
  F:    block/bfq-*
 -F:    Documentation/block/bfq-iosched.txt
 +F:    Documentation/block/bfq-iosched.rst
  
  BFS FILE SYSTEM
  M:    "Tigran A. Aivazian" <aivazian.tigran@gmail.com>
@@@ -3108,9 -3108,9 +3108,9 @@@ S:      Maintaine
  F:    arch/riscv/net/
  
  BPF JIT for S390
 +M:    Ilya Leoshkevich <iii@linux.ibm.com>
  M:    Heiko Carstens <heiko.carstens@de.ibm.com>
  M:    Vasily Gorbik <gor@linux.ibm.com>
 -M:    Christian Borntraeger <borntraeger@de.ibm.com>
  L:    netdev@vger.kernel.org
  L:    bpf@vger.kernel.org
  S:    Maintained
@@@ -3765,7 -3765,7 +3765,7 @@@ F:      arch/powerpc/platforms/cell
  
  CEPH COMMON CODE (LIBCEPH)
  M:    Ilya Dryomov <idryomov@gmail.com>
 -M:    "Yan, Zheng" <zyan@redhat.com>
 +M:    Jeff Layton <jlayton@kernel.org>
  M:    Sage Weil <sage@redhat.com>
  L:    ceph-devel@vger.kernel.org
  W:    http://ceph.com/
@@@ -3777,7 -3777,7 +3777,7 @@@ F:      include/linux/ceph
  F:    include/linux/crush/
  
  CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH)
 -M:    "Yan, Zheng" <zyan@redhat.com>
 +M:    Jeff Layton <jlayton@kernel.org>
  M:    Sage Weil <sage@redhat.com>
  M:    Ilya Dryomov <idryomov@gmail.com>
  L:    ceph-devel@vger.kernel.org
@@@ -4158,7 -4158,7 +4158,7 @@@ L:      cgroups@vger.kernel.or
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
  S:    Maintained
  F:    Documentation/admin-guide/cgroup-v2.rst
 -F:    Documentation/cgroup-v1/
 +F:    Documentation/admin-guide/cgroup-v1/
  F:    include/linux/cgroup*
  F:    kernel/cgroup/
  
@@@ -4169,7 -4169,7 +4169,7 @@@ W:      http://www.bullopensource.org/cpuset
  W:    http://oss.sgi.com/projects/cpusets/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
  S:    Maintained
 -F:    Documentation/cgroup-v1/cpusets.rst
 +F:    Documentation/admin-guide/cgroup-v1/cpusets.rst
  F:    include/linux/cpuset.h
  F:    kernel/cgroup/cpuset.c
  
@@@ -4183,19 -4183,6 +4183,19 @@@ S:    Maintaine
  F:    mm/memcontrol.c
  F:    mm/swap_cgroup.c
  
 +CONTROL GROUP - BLOCK IO CONTROLLER (BLKIO)
 +M:    Tejun Heo <tj@kernel.org>
 +M:    Jens Axboe <axboe@kernel.dk>
 +L:    cgroups@vger.kernel.org
 +L:    linux-block@vger.kernel.org
 +T:    git git://git.kernel.dk/linux-block
 +F:    Documentation/cgroup-v1/blkio-controller.rst
 +F:    block/blk-cgroup.c
 +F:    include/linux/blk-cgroup.h
 +F:    block/blk-throttle.c
 +F:    block/blk-iolatency.c
 +F:    block/bfq-cgroup.c
 +
  CORETEMP HARDWARE MONITORING DRIVER
  M:    Fenghua Yu <fenghua.yu@intel.com>
  L:    linux-hwmon@vger.kernel.org
@@@ -4655,7 -4642,7 +4655,7 @@@ DELL SYSTEMS MANAGEMENT BASE DRIVER (dc
  M:    Stuart Hayes <stuart.w.hayes@gmail.com>
  L:    platform-driver-x86@vger.kernel.org
  S:    Maintained
 -F:    Documentation/dcdbas.txt
 +F:    Documentation/driver-api/dcdbas.rst
  F:    drivers/platform/x86/dcdbas.*
  
  DELL WMI NOTIFICATIONS DRIVER
@@@ -4683,13 -4670,6 +4683,13 @@@ L:    linux-mtd@lists.infradead.or
  S:    Supported
  F:    drivers/mtd/nand/raw/denali*
  
 +DESIGNWARE EDMA CORE IP DRIVER
 +M:    Gustavo Pimentel <gustavo.pimentel@synopsys.com>
 +L:    dmaengine@vger.kernel.org
 +S:    Maintained
 +F:    drivers/dma/dw-edma/
 +F:    include/linux/dma/edma.h
 +
  DESIGNWARE USB2 DRD IP DRIVER
  M:    Minas Harutyunyan <hminas@synopsys.com>
  L:    linux-usb@vger.kernel.org
@@@ -4755,7 -4735,7 +4755,7 @@@ Q:      http://patchwork.kernel.org/project/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm.git
  T:    quilt http://people.redhat.com/agk/patches/linux/editing/
  S:    Maintained
 -F:    Documentation/device-mapper/
 +F:    Documentation/admin-guide/device-mapper/
  F:    drivers/md/Makefile
  F:    drivers/md/Kconfig
  F:    drivers/md/dm*
@@@ -5026,7 -5006,7 +5026,7 @@@ T:      git git://git.linbit.com/drbd-8.4.gi
  S:    Supported
  F:    drivers/block/drbd/
  F:    lib/lru_cache.c
 -F:    Documentation/blockdev/drbd/
 +F:    Documentation/admin-guide/blockdev/
  
  DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
  M:    Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@@ -5219,13 -5199,6 +5219,13 @@@ S:    Maintaine
  F:    drivers/gpu/drm/tinydrm/st7735r.c
  F:    Documentation/devicetree/bindings/display/sitronix,st7735r.txt
  
 +DRM DRIVER FOR ST-ERICSSON MCDE
 +M:    Linus Walleij <linus.walleij@linaro.org>
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +S:    Maintained
 +F:    drivers/gpu/drm/mcde/
 +F:    Documentation/devicetree/bindings/display/ste,mcde.txt
 +
  DRM DRIVER FOR TDFX VIDEO CARDS
  S:    Orphan / Obsolete
  F:    drivers/gpu/drm/tdfx/
@@@ -5511,7 -5484,6 +5511,7 @@@ T:      git git://anongit.freedesktop.org/dr
  
  DRM PANEL DRIVERS
  M:    Thierry Reding <thierry.reding@gmail.com>
 +R:    Sam Ravnborg <sam@ravnborg.org>
  L:    dri-devel@lists.freedesktop.org
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  S:    Maintained
@@@ -5540,6 -5512,7 +5540,6 @@@ F:      Documentation/gpu/xen-front.rs
  DRM TTM SUBSYSTEM
  M:    Christian Koenig <christian.koenig@amd.com>
  M:    Huang Rui <ray.huang@amd.com>
 -M:    Junwei Zhang <Jerry.Zhang@amd.com>
  T:    git git://people.freedesktop.org/~agd5f/linux
  S:    Maintained
  L:    dri-devel@lists.freedesktop.org
@@@ -6107,7 -6080,7 +6107,7 @@@ M:      Ard Biesheuvel <ard.biesheuvel@linar
  L:    linux-efi@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
  S:    Maintained
 -F:    Documentation/efi-stub.txt
 +F:    Documentation/admin-guide/efi-stub.rst
  F:    arch/*/kernel/efi.c
  F:    arch/x86/boot/compressed/eboot.[ch]
  F:    arch/*/include/asm/efi.h
@@@ -6321,8 -6294,9 +6321,8 @@@ F:      Documentation/devicetree/bindings/co
  F:    drivers/counter/ftm-quaddec.c
  
  FLOPPY DRIVER
 -M:    Jiri Kosina <jikos@kernel.org>
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git
 -S:    Odd fixes
 +S:    Orphan
 +L:    linux-block@vger.kernel.org
  F:    drivers/block/floppy.c
  
  FMC SUBSYSTEM
@@@ -6567,7 -6541,7 +6567,7 @@@ M:      "Rafael J. Wysocki" <rjw@rjwysocki.n
  M:    Pavel Machek <pavel@ucw.cz>
  L:    linux-pm@vger.kernel.org
  S:    Supported
 -F:    Documentation/power/freezing-of-tasks.txt
 +F:    Documentation/power/freezing-of-tasks.rst
  F:    include/linux/freezer.h
  F:    kernel/freezer.c
  
@@@ -6681,7 -6655,7 +6681,7 @@@ S:      Maintaine
  F:    scripts/gcc-plugins/
  F:    scripts/gcc-plugin.sh
  F:    scripts/Makefile.gcc-plugins
 -F:    Documentation/gcc-plugins.txt
 +F:    Documentation/core-api/gcc-plugins.rst
  
  GASKET DRIVER FRAMEWORK
  M:    Rob Springer <rspringer@google.com>
@@@ -6893,7 -6867,7 +6893,7 @@@ T:      git git://git.kernel.org/pub/scm/lin
  S:    Maintained
  F:    Documentation/devicetree/bindings/gpio/
  F:    Documentation/driver-api/gpio/
 -F:    Documentation/gpio/
 +F:    Documentation/admin-guide/gpio/
  F:    Documentation/ABI/testing/gpio-cdev
  F:    Documentation/ABI/obsolete/sysfs-gpio
  F:    drivers/gpio/
@@@ -7114,7 -7088,7 +7114,7 @@@ M:      Herbert Xu <herbert@gondor.apana.org
  L:    linux-crypto@vger.kernel.org
  S:    Odd fixes
  F:    Documentation/devicetree/bindings/rng/
 -F:    Documentation/hw_random.txt
 +F:    Documentation/admin-guide/hw_random.rst
  F:    drivers/char/hw_random/
  F:    include/linux/hw_random.h
  
@@@ -7288,7 -7262,7 +7288,7 @@@ M:      Shaokun Zhang <zhangshaokun@hisilico
  W:    http://www.hisilicon.com
  S:    Supported
  F:    drivers/perf/hisilicon
 -F:    Documentation/perf/hisi-pmu.txt
 +F:    Documentation/admin-guide/perf/hisi-pmu.rst
  
  HISILICON ROCE DRIVER
  M:    Lijun Ou <oulijun@huawei.com>
@@@ -7961,33 -7935,6 +7961,33 @@@ L:    linux-mtd@lists.infradead.or
  S:    Maintained
  F:    drivers/mtd/nand/raw/ingenic/
  
 +INGENIC JZ47xx SoCs
 +M:    Paul Cercueil <paul@crapouillou.net>
 +S:    Maintained
 +F:    arch/mips/boot/dts/ingenic/
 +F:    arch/mips/include/asm/mach-jz4740/
 +F:    arch/mips/jz4740/
 +F:    drivers/clk/ingenic/
 +F:    drivers/dma/dma-jz4780.c
 +F:    drivers/gpu/drm/ingenic/
 +F:    drivers/i2c/busses/i2c-jz4780.c
 +F:    drivers/iio/adc/ingenic-adc.c
 +F:    drivers/irqchip/irq-ingenic.c
 +F:    drivers/memory/jz4780-nemc.c
 +F:    drivers/mmc/host/jz4740_mmc.c
 +F:    drivers/mtd/nand/raw/ingenic/
 +F:    drivers/pinctrl/pinctrl-ingenic.c
 +F:    drivers/power/supply/ingenic-battery.c
 +F:    drivers/pwm/pwm-jz4740.c
 +F:    drivers/rtc/rtc-jz4740.c
 +F:    drivers/tty/serial/8250/8250_ingenic.c
 +F:    drivers/usb/musb/jz4740.c
 +F:    drivers/watchdog/jz4740_wdt.c
 +F:    include/dt-bindings/iio/adc/ingenic,adc.h
 +F:    include/linux/mfd/ingenic-tcu.h
 +F:    sound/soc/jz4740/
 +F:    sound/soc/codecs/jz47*
 +
  INOTIFY
  M:    Jan Kara <jack@suse.cz>
  R:    Amir Goldstein <amir73il@gmail.com>
@@@ -8268,7 -8215,7 +8268,7 @@@ T:      git git://git.kernel.org/pub/scm/lin
  F:    drivers/gpio/gpio-*cove.c
  F:    drivers/gpio/gpio-msic.c
  
 -INTEL MULTIFUNCTION PMIC DEVICE DRIVERS
 +INTEL PMIC MULTIFUNCTION DEVICE DRIVERS
  R:    Andy Shevchenko <andriy.shevchenko@linux.intel.com>
  S:    Maintained
  F:    drivers/mfd/intel_msic.c
@@@ -8365,7 -8312,7 +8365,7 @@@ L:      tboot-devel@lists.sourceforge.ne
  W:    http://tboot.sourceforge.net
  T:    hg http://tboot.hg.sourceforge.net:8000/hgroot/tboot/tboot
  S:    Supported
 -F:    Documentation/intel_txt.txt
 +F:    Documentation/x86/intel_txt.rst
  F:    include/linux/tboot.h
  F:    arch/x86/kernel/tboot.c
  
@@@ -8379,7 -8326,7 +8379,7 @@@ INTERCONNECT AP
  M:    Georgi Djakov <georgi.djakov@linaro.org>
  L:    linux-pm@vger.kernel.org
  S:    Maintained
 -F:    Documentation/interconnect/
 +F:    Documentation/driver-api/interconnect.rst
  F:    Documentation/devicetree/bindings/interconnect/
  F:    drivers/interconnect/
  F:    include/dt-bindings/interconnect/
@@@ -8415,6 -8362,7 +8415,7 @@@ L:      linux-fsdevel@vger.kernel.or
  T:    git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
  S:    Supported
  F:    fs/iomap.c
+ F:    fs/iomap/
  F:    include/linux/iomap.h
  
  IOMMU DRIVERS
@@@ -8515,7 -8463,7 +8516,7 @@@ F:      drivers/irqchip
  ISA
  M:    William Breathitt Gray <vilhelm.gray@gmail.com>
  S:    Maintained
 -F:    Documentation/isa.txt
 +F:    Documentation/driver-api/isa.rst
  F:    drivers/base/isa.c
  F:    include/linux/isa.h
  
@@@ -8530,7 -8478,7 +8531,7 @@@ F:      drivers/media/radio/radio-isa
  ISAPNP
  M:    Jaroslav Kysela <perex@perex.cz>
  S:    Maintained
 -F:    Documentation/isapnp.txt
 +F:    Documentation/driver-api/isapnp.rst
  F:    drivers/pnp/isapnp/
  F:    include/linux/isapnp.h
  
@@@ -8728,7 -8676,7 +8729,7 @@@ R:      Vivek Goyal <vgoyal@redhat.com
  L:    kexec@lists.infradead.org
  W:    http://lse.sourceforge.net/kdump/
  S:    Maintained
 -F:    Documentation/kdump/
 +F:    Documentation/admin-guide/kdump/
  
  KEENE FM RADIO TRANSMITTER DRIVER
  M:    Hans Verkuil <hverkuil@xs4all.nl>
@@@ -9082,7 -9030,7 +9083,7 @@@ M:      Matan Ziv-Av <matan@svgalib.org
  L:    platform-driver-x86@vger.kernel.org
  S:    Maintained
  F:    Documentation/ABI/testing/sysfs-platform-lg-laptop
 -F:    Documentation/laptops/lg-laptop.rst
 +F:    Documentation/admin-guide/laptops/lg-laptop.rst
  F:    drivers/platform/x86/lg-laptop.c
  
  LG2160 MEDIA DRIVER
@@@ -9451,7 -9399,7 +9452,7 @@@ M:      "Richard Russon (FlatCap)" <ldm@flat
  L:    linux-ntfs-dev@lists.sourceforge.net
  W:    http://www.linux-ntfs.org/content/view/19/37/
  S:    Maintained
 -F:    Documentation/ldm.txt
 +F:    Documentation/admin-guide/ldm.rst
  F:    block/partitions/ldm.*
  
  LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
@@@ -10169,13 -10117,6 +10170,13 @@@ L: linux-wireless@vger.kernel.or
  S:    Maintained
  F:    drivers/net/wireless/mediatek/mt7601u/
  
 +MEDIATEK MT7621/28/88 I2C DRIVER
 +M:    Stefan Roese <sr@denx.de>
 +L:    linux-i2c@vger.kernel.org
 +S:    Maintained
 +F:    drivers/i2c/busses/i2c-mt7621.c
 +F:    Documentation/devicetree/bindings/i2c/i2c-mt7621.txt
 +
  MEDIATEK NAND CONTROLLER DRIVER
  M:    Xiaolei Li <xiaolei.li@mediatek.com>
  L:    linux-mtd@lists.infradead.org
@@@ -10413,7 -10354,7 +10414,7 @@@ M:   Johannes Thumshirn <morbidrsa@gmail.
  S:    Maintained
  F:    drivers/mcb/
  F:    include/linux/mcb.h
 -F:    Documentation/men-chameleon-bus.txt
 +F:    Documentation/driver-api/men-chameleon-bus.rst
  
  MEN F21BMC (Board Management Controller)
  M:    Andreas Werner <andreas.werner@men.de>
@@@ -10827,7 -10768,7 +10828,7 @@@ F:   include/uapi/linux/meye.
  MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
  M:    Jiri Slaby <jirislaby@gmail.com>
  S:    Maintained
 -F:    Documentation/serial/moxa-smartio.rst
 +F:    Documentation/driver-api/serial/moxa-smartio.rst
  F:    drivers/tty/mxser.*
  
  MR800 AVERMEDIA USB FM RADIO DRIVER
@@@ -11071,6 -11012,14 +11072,6 @@@ F:  driver/net/net_failover.
  F:    include/net/net_failover.h
  F:    Documentation/networking/net_failover.rst
  
 -NETEFFECT IWARP RNIC DRIVER (IW_NES)
 -M:    Faisal Latif <faisal.latif@intel.com>
 -L:    linux-rdma@vger.kernel.org
 -W:    http://www.intel.com/Products/Server/Adapters/Server-Cluster/Server-Cluster-overview.htm
 -S:    Supported
 -F:    drivers/infiniband/hw/nes/
 -F:    include/uapi/rdma/nes-abi.h
 -
  NETEM NETWORK EMULATOR
  M:    Stephen Hemminger <stephen@networkplumber.org>
  L:    netem@lists.linux-foundation.org (moderated for non-subscribers)
@@@ -11128,7 -11077,7 +11129,7 @@@ M:   Josef Bacik <josef@toxicpanda.com
  S:    Maintained
  L:    linux-block@vger.kernel.org
  L:    nbd@other.debian.org
 -F:    Documentation/blockdev/nbd.txt
 +F:    Documentation/admin-guide/blockdev/nbd.rst
  F:    drivers/block/nbd.c
  F:    include/trace/events/nbd.h
  F:    include/uapi/linux/nbd.h
@@@ -11587,7 -11536,7 +11588,7 @@@ F:   arch/powerpc/include/asm/pnv-ocxl.
  F:    drivers/misc/ocxl/
  F:    include/misc/ocxl*
  F:    include/uapi/misc/ocxl.h
 -F:    Documentation/accelerators/ocxl.rst
 +F:    Documentation/userspace-api/accelerators/ocxl.rst
  
  OMAP AUDIO SUPPORT
  M:    Peter Ujfalusi <peter.ujfalusi@ti.com>
@@@ -11623,7 -11572,7 +11624,7 @@@ L:   linux-omap@vger.kernel.or
  L:    linux-fbdev@vger.kernel.org
  S:    Orphan
  F:    drivers/video/fbdev/omap2/
 -F:    Documentation/arm/OMAP/DSS
 +F:    Documentation/arm/omap/dss.rst
  
  OMAP FRAMEBUFFER SUPPORT
  L:    linux-fbdev@vger.kernel.org
@@@ -11995,7 -11944,7 +11996,7 @@@ S:   Maintaine
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git
  F:    drivers/opp/
  F:    include/linux/pm_opp.h
 -F:    Documentation/power/opp.txt
 +F:    Documentation/power/opp.rst
  F:    Documentation/devicetree/bindings/opp/
  
  OPL4 DRIVER
@@@ -12110,7 -12059,7 +12111,7 @@@ PARALLEL LCD/KEYPAD PANEL DRIVE
  M:    Willy Tarreau <willy@haproxy.com>
  M:    Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
  S:    Odd Fixes
 -F:    Documentation/auxdisplay/lcd-panel-cgram.txt
 +F:    Documentation/admin-guide/lcd-panel-cgram.rst
  F:    drivers/auxdisplay/panel.c
  
  PARALLEL PORT SUBSYSTEM
@@@ -12122,7 -12071,7 +12123,7 @@@ F:   drivers/parport
  F:    include/linux/parport*.h
  F:    drivers/char/ppdev.c
  F:    include/uapi/linux/ppdev.h
 -F:    Documentation/parport*.txt
 +F:    Documentation/driver-api/parport*.rst
  
  PARAVIRT_OPS INTERFACE
  M:    Juergen Gross <jgross@suse.com>
@@@ -12138,7 -12087,7 +12139,7 @@@ PARIDE DRIVERS FOR PARALLEL PORT IDE DE
  M:    Tim Waugh <tim@cyberelk.net>
  L:    linux-parport@lists.infradead.org (subscribers-only)
  S:    Maintained
 -F:    Documentation/blockdev/paride.txt
 +F:    Documentation/admin-guide/blockdev/paride.rst
  F:    drivers/block/paride/
  
  PARISC ARCHITECTURE
@@@ -12297,7 -12246,7 +12298,7 @@@ M:   Kurt Schwemmer <kurt.schwemmer@micro
  M:    Logan Gunthorpe <logang@deltatee.com>
  L:    linux-pci@vger.kernel.org
  S:    Maintained
 -F:    Documentation/switchtec.txt
 +F:    Documentation/driver-api/switchtec.rst
  F:    Documentation/ABI/testing/sysfs-class-switchtec
  F:    drivers/pci/switch/switchtec*
  F:    include/uapi/linux/switchtec_ioctl.h
@@@ -12382,7 -12331,7 +12383,7 @@@ M:   Sam Bobroff <sbobroff@linux.ibm.com
  M:    Oliver O'Halloran <oohall@gmail.com>
  L:    linuxppc-dev@lists.ozlabs.org
  S:    Supported
 -F:    Documentation/PCI/pci-error-recovery.txt
 +F:    Documentation/PCI/pci-error-recovery.rst
  F:    drivers/pci/pcie/aer.c
  F:    drivers/pci/pcie/dpc.c
  F:    drivers/pci/pcie/err.c
@@@ -12395,7 -12344,7 +12396,7 @@@ PCI ERROR RECOVER
  M:    Linas Vepstas <linasvepstas@gmail.com>
  L:    linux-pci@vger.kernel.org
  S:    Supported
 -F:    Documentation/PCI/pci-error-recovery.txt
 +F:    Documentation/PCI/pci-error-recovery.rst
  
  PCI MSI DRIVER FOR ALTERA MSI IP
  M:    Ley Foon Tan <lftan@altera.com>
@@@ -12644,17 -12593,6 +12645,17 @@@ F: arch/arm/boot/dts/picoxcell
  F:    arch/arm/mach-picoxcell/
  F:    drivers/crypto/picoxcell*
  
 +PIDFD API
 +M:    Christian Brauner <christian@brauner.io>
 +L:    linux-kernel@vger.kernel.org
 +S:    Maintained
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux.git
 +F:    samples/pidfd/
 +F:    tools/testing/selftests/pidfd/
 +K:    (?i)pidfd
 +K:    (?i)clone3
 +K:    \b(clone_args|kernel_clone_args)\b
 +
  PIN CONTROL SUBSYSTEM
  M:    Linus Walleij <linus.walleij@linaro.org>
  L:    linux-gpio@vger.kernel.org
@@@ -12840,7 -12778,6 +12841,7 @@@ F:   drivers/base/power
  F:    include/linux/pm.h
  F:    include/linux/pm_*
  F:    include/linux/powercap.h
 +F:    include/linux/intel_rapl.h
  F:    drivers/powercap/
  F:    kernel/configs/nopm.config
  
@@@ -13070,7 -13007,7 +13071,7 @@@ M:   Thierry Reding <thierry.reding@gmail
  L:    linux-pwm@vger.kernel.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/thierry.reding/linux-pwm.git
 -F:    Documentation/pwm.txt
 +F:    Documentation/driver-api/pwm.rst
  F:    Documentation/devicetree/bindings/pwm/
  F:    include/linux/pwm.h
  F:    drivers/pwm/
@@@ -13431,7 -13368,7 +13432,7 @@@ F:   drivers/net/wireless/ralink/rt2x00
  RAMDISK RAM BLOCK DEVICE DRIVER
  M:    Jens Axboe <axboe@kernel.dk>
  S:    Maintained
 -F:    Documentation/blockdev/ramdisk.txt
 +F:    Documentation/admin-guide/blockdev/ramdisk.rst
  F:    drivers/block/brd.c
  
  RANCHU VIRTUAL BOARD FOR MIPS
@@@ -13540,7 -13477,7 +13541,7 @@@ Q:   http://patchwork.ozlabs.org/project/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git
  S:    Maintained
  F:    Documentation/devicetree/bindings/rtc/
 -F:    Documentation/rtc.txt
 +F:    Documentation/admin-guide/rtc.rst
  F:    drivers/rtc/
  F:    include/linux/rtc.h
  F:    include/uapi/linux/rtc.h
@@@ -13590,11 -13527,9 +13591,11 @@@ L: linux-remoteproc@vger.kernel.or
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/remoteproc.git
  S:    Maintained
  F:    Documentation/devicetree/bindings/remoteproc/
 +F:    Documentation/ABI/testing/sysfs-class-remoteproc
  F:    Documentation/remoteproc.txt
  F:    drivers/remoteproc/
  F:    include/linux/remoteproc.h
 +F:    include/linux/remoteproc/
  
  REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM
  M:    Ohad Ben-Cohen <ohad@wizery.com>
@@@ -13604,11 -13539,8 +13605,11 @@@ T: git git://git.kernel.org/pub/scm/lin
  S:    Maintained
  F:    drivers/rpmsg/
  F:    Documentation/rpmsg.txt
 +F:    Documentation/ABI/testing/sysfs-bus-rpmsg
  F:    include/linux/rpmsg.h
  F:    include/linux/rpmsg/
 +F:    include/uapi/linux/rpmsg.h
 +F:    samples/rpmsg/
  
  RENESAS CLOCK DRIVERS
  M:    Geert Uytterhoeven <geert+renesas@glider.be>
@@@ -13689,7 -13621,7 +13690,7 @@@ W:   http://wireless.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
  S:    Maintained
 -F:    Documentation/rfkill.txt
 +F:    Documentation/driver-api/rfkill.rst
  F:    Documentation/ABI/stable/sysfs-class-rfkill
  F:    net/rfkill/
  F:    include/linux/rfkill.h
@@@ -13720,7 -13652,7 +13721,7 @@@ RISC-V ARCHITECTUR
  M:    Palmer Dabbelt <palmer@sifive.com>
  M:    Albert Ou <aou@eecs.berkeley.edu>
  L:    linux-riscv@lists.infradead.org
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git
  S:    Supported
  F:    arch/riscv/
  K:    riscv
@@@ -13758,7 -13690,7 +13759,7 @@@ ROCKETPORT DRIVE
  P:    Comtrol Corp.
  W:    http://www.comtrol.com
  S:    Maintained
 -F:    Documentation/serial/rocket.rst
 +F:    Documentation/driver-api/serial/rocket.rst
  F:    drivers/tty/rocket*
  
  ROCKETPORT EXPRESS/INFINITY DRIVER
@@@ -14152,7 -14084,7 +14153,7 @@@ M:   Sylwester Nawrocki <s.nawrocki@samsu
  L:    linux-kernel@vger.kernel.org
  S:    Supported
  F:    Documentation/devicetree/bindings/phy/samsung-phy.txt
 -F:    Documentation/phy/samsung-usb2.txt
 +F:    Documentation/driver-api/phy/samsung-usb2.rst
  F:    drivers/phy/samsung/phy-exynos4210-usb2.c
  F:    drivers/phy/samsung/phy-exynos4x12-usb2.c
  F:    drivers/phy/samsung/phy-exynos5250-usb2.c
@@@ -14458,7 -14390,7 +14459,7 @@@ SGI SN-IA64 (Altix) SERIAL CONSOLE DRIV
  M:    Pat Gefre <pfg@sgi.com>
  L:    linux-ia64@vger.kernel.org
  S:    Supported
 -F:    Documentation/ia64/serial.txt
 +F:    Documentation/ia64/serial.rst
  F:    drivers/tty/serial/ioc?_serial.c
  F:    include/linux/ioc?.h
  
@@@ -14582,7 -14514,7 +14583,7 @@@ M:   Paul Walmsley <paul.walmsley@sifive.
  L:    linux-riscv@lists.infradead.org
  T:    git git://github.com/sifive/riscv-linux.git
  S:    Supported
 -K:    sifive
 +K:    [^@]sifive
  N:    sifive
  
  SIFIVE FU540 SYSTEM-ON-CHIP
@@@ -14817,13 -14749,6 +14818,13 @@@ M: Chris Boot <bootc@bootc.net
  S:    Maintained
  F:    drivers/leds/leds-net48xx.c
  
 +SOFT-IWARP DRIVER (siw)
 +M:    Bernard Metzler <bmt@zurich.ibm.com>
 +L:    linux-rdma@vger.kernel.org
 +S:    Supported
 +F:    drivers/infiniband/sw/siw/
 +F:    include/uapi/rdma/siw-abi.h
 +
  SOFT-ROCE DRIVER (rxe)
  M:    Moni Shoua <monis@mellanox.com>
  L:    linux-rdma@vger.kernel.org
@@@ -14873,7 -14798,6 +14874,7 @@@ F:   Documentation/devicetree/bindings/ne
  
  SOCIONEXT (SNI) NETSEC NETWORK DRIVER
  M:    Jassi Brar <jaswinder.singh@linaro.org>
 +M:    Ilias Apalodimas <ilias.apalodimas@linaro.org>
  L:    netdev@vger.kernel.org
  S:    Maintained
  F:    drivers/net/ethernet/socionext/netsec.c
@@@ -14965,7 -14889,7 +14966,7 @@@ M:   Mattia Dongili <malattia@linux.it
  L:    platform-driver-x86@vger.kernel.org
  W:    http://www.linux.it/~malattia/wiki/index.php/Sony_drivers
  S:    Maintained
 -F:    Documentation/laptops/sony-laptop.txt
 +F:    Documentation/admin-guide/laptops/sony-laptop.rst
  F:    drivers/char/sonypi.c
  F:    drivers/platform/x86/sony-laptop.c
  F:    include/linux/sony-laptop.h
@@@ -15383,7 -15307,7 +15384,7 @@@ SVGA HANDLIN
  M:    Martin Mares <mj@ucw.cz>
  L:    linux-video@atrey.karlin.mff.cuni.cz
  S:    Maintained
 -F:    Documentation/svga.txt
 +F:    Documentation/admin-guide/svga.rst
  F:    arch/x86/boot/video*
  
  SWIOTLB SUBSYSTEM
@@@ -15420,7 -15344,7 +15421,7 @@@ F:   drivers/dma-buf/dma-fence
  F:    drivers/dma-buf/sw_sync.c
  F:    include/linux/sync_file.h
  F:    include/uapi/linux/sync_file.h
 -F:    Documentation/sync_file.txt
 +F:    Documentation/driver-api/sync_file.rst
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  
  SYNOPSYS ARC ARCHITECTURE
@@@ -15897,7 -15821,7 +15898,7 @@@ M:   Viresh Kumar <viresh.kumar@linaro.or
  M:    Javi Merino <javi.merino@kernel.org>
  L:    linux-pm@vger.kernel.org
  S:    Supported
 -F:    Documentation/thermal/cpu-cooling-api.txt
 +F:    Documentation/thermal/cpu-cooling-api.rst
  F:    drivers/thermal/cpu_cooling.c
  F:    include/linux/cpu_cooling.h
  
@@@ -16041,7 -15965,7 +16042,7 @@@ F:   sound/soc/codecs/isabelle
  TI LP855x BACKLIGHT DRIVER
  M:    Milo Kim <milo.kim@ti.com>
  S:    Maintained
 -F:    Documentation/backlight/lp855x-driver.txt
 +F:    Documentation/driver-api/backlight/lp855x-driver.rst
  F:    drivers/video/backlight/lp855x_bl.c
  F:    include/linux/platform_data/lp855x.h
  
@@@ -16305,7 -16229,7 +16306,7 @@@ M:   Greg Kroah-Hartman <gregkh@linuxfoun
  M:    Jiri Slaby <jslaby@suse.com>
  S:    Supported
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git
 -F:    Documentation/serial/
 +F:    Documentation/driver-api/serial/
  F:    drivers/tty/
  F:    drivers/tty/serial/serial_core.c
  F:    include/linux/serial_core.h
@@@ -16916,7 -16840,7 +16917,7 @@@ R:   Cornelia Huck <cohuck@redhat.com
  L:    kvm@vger.kernel.org
  T:    git git://github.com/awilliam/linux-vfio.git
  S:    Maintained
 -F:    Documentation/vfio.txt
 +F:    Documentation/driver-api/vfio.rst
  F:    drivers/vfio/
  F:    include/linux/vfio.h
  F:    include/uapi/linux/vfio.h
@@@ -16925,7 -16849,7 +16926,7 @@@ VFIO MEDIATED DEVICE DRIVER
  M:    Kirti Wankhede <kwankhede@nvidia.com>
  L:    kvm@vger.kernel.org
  S:    Maintained
 -F:    Documentation/vfio-mediated-device.txt
 +F:    Documentation/driver-api/vfio-mediated-device.rst
  F:    drivers/vfio/mdev/
  F:    include/linux/mdev.h
  F:    samples/vfio-mdev/
@@@ -17113,13 -17037,6 +17114,13 @@@ S: Maintaine
  F:    drivers/virtio/virtio_input.c
  F:    include/uapi/linux/virtio_input.h
  
 +VIRTIO IOMMU DRIVER
 +M:    Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
 +L:    virtualization@lists.linux-foundation.org
 +S:    Maintained
 +F:    drivers/iommu/virtio-iommu.c
 +F:    include/uapi/linux/virtio_iommu.h
 +
  VIRTUAL BOX GUEST DEVICE DRIVER
  M:    Hans de Goede <hdegoede@redhat.com>
  M:    Arnd Bergmann <arnd@arndb.de>
@@@ -17652,8 -17569,9 +17653,8 @@@ L:   linux-xfs@vger.kernel.or
  W:    http://xfs.org/
  T:    git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
  S:    Supported
 -F:    Documentation/filesystems/xfs.txt
 +F:    Documentation/admin-guide/xfs.rst
  F:    Documentation/ABI/testing/sysfs-fs-xfs
 -F:    Documentation/filesystems/xfs.txt
  F:    Documentation/filesystems/xfs-delayed-logging-design.txt
  F:    Documentation/filesystems/xfs-self-describing-metadata.txt
  F:    fs/xfs/
@@@ -17806,7 -17724,7 +17807,7 @@@ R:   Sergey Senozhatsky <sergey.senozhats
  L:    linux-kernel@vger.kernel.org
  S:    Maintained
  F:    drivers/block/zram/
 -F:    Documentation/blockdev/zram.txt
 +F:    Documentation/admin-guide/blockdev/zram.rst
  
  ZS DECSTATION Z85C30 SERIAL DRIVER
  M:    "Maciej W. Rozycki" <macro@linux-mips.org>
diff --combined fs/dax.c
index e99e5f373c888b0d6c6d548a79a1bbfa24d0aa44,cb53f9bd6fd702c2bc166988ffc4716743fb6122..a237141d8787166eddf9701210ed811ffb77138e
+++ b/fs/dax.c
@@@ -26,7 -26,6 +26,6 @@@
  #include <linux/mmu_notifier.h>
  #include <linux/iomap.h>
  #include <asm/pgalloc.h>
- #include "internal.h"
  
  #define CREATE_TRACE_POINTS
  #include <trace/events/fs_dax.h>
@@@ -123,15 -122,6 +122,15 @@@ static int dax_is_empty_entry(void *ent
        return xa_to_value(entry) & DAX_EMPTY;
  }
  
 +/*
 + * true if the entry that was found is of a smaller order than the entry
 + * we were looking for
 + */
 +static bool dax_is_conflict(void *entry)
 +{
 +      return entry == XA_RETRY_ENTRY;
 +}
 +
  /*
   * DAX page cache entry locking
   */
@@@ -204,13 -194,11 +203,13 @@@ static void dax_wake_entry(struct xa_st
   * Look up entry in page cache, wait for it to become unlocked if it
   * is a DAX entry and return it.  The caller must subsequently call
   * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
 - * if it did.
 + * if it did.  The entry returned may have a larger order than @order.
 + * If @order is larger than the order of the entry found in i_pages, this
 + * function returns a dax_is_conflict entry.
   *
   * Must be called with the i_pages lock held.
   */
 -static void *get_unlocked_entry(struct xa_state *xas)
 +static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
  {
        void *entry;
        struct wait_exceptional_entry_queue ewait;
  
        for (;;) {
                entry = xas_find_conflict(xas);
 +              if (dax_entry_order(entry) < order)
 +                      return XA_RETRY_ENTRY;
                if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
                                !dax_is_locked(entry))
                        return entry;
@@@ -267,7 -253,7 +266,7 @@@ static void wait_entry_unlocked(struct 
  static void put_unlocked_entry(struct xa_state *xas, void *entry)
  {
        /* If we were the only waiter woken, wake the next one */
 -      if (entry)
 +      if (entry && dax_is_conflict(entry))
                dax_wake_entry(xas, entry, false);
  }
  
@@@ -474,7 -460,7 +473,7 @@@ void dax_unlock_page(struct page *page
   * overlap with xarray value entries.
   */
  static void *grab_mapping_entry(struct xa_state *xas,
 -              struct address_space *mapping, unsigned long size_flag)
 +              struct address_space *mapping, unsigned int order)
  {
        unsigned long index = xas->xa_index;
        bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
  
  retry:
        xas_lock_irq(xas);
 -      entry = get_unlocked_entry(xas);
 +      entry = get_unlocked_entry(xas, order);
  
        if (entry) {
 +              if (dax_is_conflict(entry))
 +                      goto fallback;
                if (!xa_is_value(entry)) {
                        xas_set_err(xas, EIO);
                        goto out_unlock;
                }
  
 -              if (size_flag & DAX_PMD) {
 -                      if (dax_is_pte_entry(entry)) {
 -                              put_unlocked_entry(xas, entry);
 -                              goto fallback;
 -                      }
 -              } else { /* trying to grab a PTE entry */
 +              if (order == 0) {
                        if (dax_is_pmd_entry(entry) &&
                            (dax_is_zero_entry(entry) ||
                             dax_is_empty_entry(entry))) {
        if (entry) {
                dax_lock_entry(xas, entry);
        } else {
 -              entry = dax_make_entry(pfn_to_pfn_t(0), size_flag | DAX_EMPTY);
 +              unsigned long flags = DAX_EMPTY;
 +
 +              if (order > 0)
 +                      flags |= DAX_PMD;
 +              entry = dax_make_entry(pfn_to_pfn_t(0), flags);
                dax_lock_entry(xas, entry);
                if (xas_error(xas))
                        goto out_unlock;
@@@ -608,7 -593,7 +607,7 @@@ struct page *dax_layout_busy_page(struc
                if (WARN_ON_ONCE(!xa_is_value(entry)))
                        continue;
                if (unlikely(dax_is_locked(entry)))
 -                      entry = get_unlocked_entry(&xas);
 +                      entry = get_unlocked_entry(&xas, 0);
                if (entry)
                        page = dax_busy_page(entry);
                put_unlocked_entry(&xas, entry);
@@@ -635,7 -620,7 +634,7 @@@ static int __dax_invalidate_entry(struc
        void *entry;
  
        xas_lock_irq(&xas);
 -      entry = get_unlocked_entry(&xas);
 +      entry = get_unlocked_entry(&xas, 0);
        if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
                goto out;
        if (!trunc &&
@@@ -862,7 -847,7 +861,7 @@@ static int dax_writeback_one(struct xa_
        if (unlikely(dax_is_locked(entry))) {
                void *old_entry = entry;
  
 -              entry = get_unlocked_entry(xas);
 +              entry = get_unlocked_entry(xas, 0);
  
                /* Entry got punched out / reallocated? */
                if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
@@@ -1523,7 -1508,7 +1522,7 @@@ static vm_fault_t dax_iomap_pmd_fault(s
         * entry is already in the array, for instance), it will return
         * VM_FAULT_FALLBACK.
         */
 -      entry = grab_mapping_entry(&xas, mapping, DAX_PMD);
 +      entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
        if (xa_is_internal(entry)) {
                result = xa_to_internal(entry);
                goto fallback;
@@@ -1672,10 -1657,11 +1671,10 @@@ dax_insert_pfn_mkwrite(struct vm_fault 
        vm_fault_t ret;
  
        xas_lock_irq(&xas);
 -      entry = get_unlocked_entry(&xas);
 +      entry = get_unlocked_entry(&xas, order);
        /* Did we race with someone splitting entry or so? */
 -      if (!entry ||
 -          (order == 0 && !dax_is_pte_entry(entry)) ||
 -          (order == PMD_ORDER && !dax_is_pmd_entry(entry))) {
 +      if (!entry || dax_is_conflict(entry) ||
 +          (order == 0 && !dax_is_pte_entry(entry))) {
                put_unlocked_entry(&xas, entry);
                xas_unlock_irq(&xas);
                trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
diff --combined fs/internal.h
index b9bad2d30cef30d7efcb2d1d984587054de2c694,2b0bebd679045b208bc9804c497ab58178cd4063..ff5173212803b2e6a57335e2e2216e422af746e6
@@@ -14,7 -14,6 +14,7 @@@ struct path
  struct mount;
  struct shrink_control;
  struct fs_context;
 +struct user_namespace;
  
  /*
   * block_dev.c
@@@ -108,7 -107,6 +108,7 @@@ extern struct file *alloc_empty_file_no
  extern int reconfigure_super(struct fs_context *);
  extern bool trylock_super(struct super_block *sb);
  extern struct super_block *user_get_super(dev_t);
 +extern bool mount_capable(struct fs_context *);
  
  /*
   * open.c
@@@ -156,7 -154,6 +156,7 @@@ extern int d_set_mounted(struct dentry 
  extern long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc);
  extern struct dentry *d_alloc_cursor(struct dentry *);
  extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
 +extern char *simple_dname(struct dentry *, char *, int);
  
  /*
   * read_write.c
@@@ -185,15 -182,5 +185,5 @@@ extern const struct dentry_operations n
  extern int do_vfs_ioctl(struct file *file, unsigned int fd, unsigned int cmd,
                    unsigned long arg);
  
- /*
-  * iomap support:
-  */
- typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
-               void *data, struct iomap *iomap);
- loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
-               unsigned flags, const struct iomap_ops *ops, void *data,
-               iomap_actor_t actor);
  /* direct-io.c: */
  int sb_init_dio_done_wq(struct super_block *sb);
diff --combined fs/iomap/buffered-io.c
index 0000000000000000000000000000000000000000,da4d958f9dc86cec067ae5e8e76030ce3afc14fb..e25901ae3ff447712ec6d5635bb7d9ddfd180d3e
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,1073 +1,1073 @@@
 -      ret = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
+ // SPDX-License-Identifier: GPL-2.0
+ /*
+  * Copyright (C) 2010 Red Hat, Inc.
+  * Copyright (c) 2016-2018 Christoph Hellwig.
+  */
+ #include <linux/module.h>
+ #include <linux/compiler.h>
+ #include <linux/fs.h>
+ #include <linux/iomap.h>
+ #include <linux/pagemap.h>
+ #include <linux/uio.h>
+ #include <linux/buffer_head.h>
+ #include <linux/dax.h>
+ #include <linux/writeback.h>
+ #include <linux/swap.h>
+ #include <linux/bio.h>
+ #include <linux/sched/signal.h>
+ #include <linux/migrate.h>
+ #include "../internal.h"
+ static struct iomap_page *
+ iomap_page_create(struct inode *inode, struct page *page)
+ {
+       struct iomap_page *iop = to_iomap_page(page);
+       if (iop || i_blocksize(inode) == PAGE_SIZE)
+               return iop;
+       iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
+       atomic_set(&iop->read_count, 0);
+       atomic_set(&iop->write_count, 0);
+       bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
+       /*
+        * migrate_page_move_mapping() assumes that pages with private data have
+        * their count elevated by 1.
+        */
+       get_page(page);
+       set_page_private(page, (unsigned long)iop);
+       SetPagePrivate(page);
+       return iop;
+ }
+ static void
+ iomap_page_release(struct page *page)
+ {
+       struct iomap_page *iop = to_iomap_page(page);
+       if (!iop)
+               return;
+       WARN_ON_ONCE(atomic_read(&iop->read_count));
+       WARN_ON_ONCE(atomic_read(&iop->write_count));
+       ClearPagePrivate(page);
+       set_page_private(page, 0);
+       put_page(page);
+       kfree(iop);
+ }
+ /*
+  * Calculate the range inside the page that we actually need to read.
+  */
+ static void
+ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
+               loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
+ {
+       loff_t orig_pos = *pos;
+       loff_t isize = i_size_read(inode);
+       unsigned block_bits = inode->i_blkbits;
+       unsigned block_size = (1 << block_bits);
+       unsigned poff = offset_in_page(*pos);
+       unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
+       unsigned first = poff >> block_bits;
+       unsigned last = (poff + plen - 1) >> block_bits;
+       /*
+        * If the block size is smaller than the page size we need to check the
+        * per-block uptodate status and adjust the offset and length if needed
+        * to avoid reading in already uptodate ranges.
+        */
+       if (iop) {
+               unsigned int i;
+               /* move forward for each leading block marked uptodate */
+               for (i = first; i <= last; i++) {
+                       if (!test_bit(i, iop->uptodate))
+                               break;
+                       *pos += block_size;
+                       poff += block_size;
+                       plen -= block_size;
+                       first++;
+               }
+               /* truncate len if we find any trailing uptodate block(s) */
+               for ( ; i <= last; i++) {
+                       if (test_bit(i, iop->uptodate)) {
+                               plen -= (last - i + 1) * block_size;
+                               last = i - 1;
+                               break;
+                       }
+               }
+       }
+       /*
+        * If the extent spans the block that contains the i_size we need to
+        * handle both halves separately so that we properly zero data in the
+        * page cache for blocks that are entirely outside of i_size.
+        */
+       if (orig_pos <= isize && orig_pos + length > isize) {
+               unsigned end = offset_in_page(isize - 1) >> block_bits;
+               if (first <= end && last > end)
+                       plen -= (last - end) * block_size;
+       }
+       *offp = poff;
+       *lenp = plen;
+ }
+ static void
+ iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+ {
+       struct iomap_page *iop = to_iomap_page(page);
+       struct inode *inode = page->mapping->host;
+       unsigned first = off >> inode->i_blkbits;
+       unsigned last = (off + len - 1) >> inode->i_blkbits;
+       unsigned int i;
+       bool uptodate = true;
+       if (iop) {
+               for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
+                       if (i >= first && i <= last)
+                               set_bit(i, iop->uptodate);
+                       else if (!test_bit(i, iop->uptodate))
+                               uptodate = false;
+               }
+       }
+       if (uptodate && !PageError(page))
+               SetPageUptodate(page);
+ }
+ static void
+ iomap_read_finish(struct iomap_page *iop, struct page *page)
+ {
+       if (!iop || atomic_dec_and_test(&iop->read_count))
+               unlock_page(page);
+ }
+ static void
+ iomap_read_page_end_io(struct bio_vec *bvec, int error)
+ {
+       struct page *page = bvec->bv_page;
+       struct iomap_page *iop = to_iomap_page(page);
+       if (unlikely(error)) {
+               ClearPageUptodate(page);
+               SetPageError(page);
+       } else {
+               iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
+       }
+       iomap_read_finish(iop, page);
+ }
+ static void
+ iomap_read_end_io(struct bio *bio)
+ {
+       int error = blk_status_to_errno(bio->bi_status);
+       struct bio_vec *bvec;
+       struct bvec_iter_all iter_all;
+       bio_for_each_segment_all(bvec, bio, iter_all)
+               iomap_read_page_end_io(bvec, error);
+       bio_put(bio);
+ }
+ struct iomap_readpage_ctx {
+       struct page             *cur_page;
+       bool                    cur_page_in_bio;
+       bool                    is_readahead;
+       struct bio              *bio;
+       struct list_head        *pages;
+ };
+ static void
+ iomap_read_inline_data(struct inode *inode, struct page *page,
+               struct iomap *iomap)
+ {
+       size_t size = i_size_read(inode);
+       void *addr;
+       if (PageUptodate(page))
+               return;
+       BUG_ON(page->index);
+       BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
+       addr = kmap_atomic(page);
+       memcpy(addr, iomap->inline_data, size);
+       memset(addr + size, 0, PAGE_SIZE - size);
+       kunmap_atomic(addr);
+       SetPageUptodate(page);
+ }
+ static loff_t
+ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+               struct iomap *iomap)
+ {
+       struct iomap_readpage_ctx *ctx = data;
+       struct page *page = ctx->cur_page;
+       struct iomap_page *iop = iomap_page_create(inode, page);
+       bool same_page = false, is_contig = false;
+       loff_t orig_pos = pos;
+       unsigned poff, plen;
+       sector_t sector;
+       if (iomap->type == IOMAP_INLINE) {
+               WARN_ON_ONCE(pos);
+               iomap_read_inline_data(inode, page, iomap);
+               return PAGE_SIZE;
+       }
+       /* zero post-eof blocks as the page may be mapped */
+       iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
+       if (plen == 0)
+               goto done;
+       if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
+               zero_user(page, poff, plen);
+               iomap_set_range_uptodate(page, poff, plen);
+               goto done;
+       }
+       ctx->cur_page_in_bio = true;
+       /*
+        * Try to merge into a previous segment if we can.
+        */
+       sector = iomap_sector(iomap, pos);
+       if (ctx->bio && bio_end_sector(ctx->bio) == sector)
+               is_contig = true;
+       if (is_contig &&
+           __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) {
+               if (!same_page && iop)
+                       atomic_inc(&iop->read_count);
+               goto done;
+       }
+       /*
+        * If we start a new segment we need to increase the read count, and we
+        * need to do so before submitting any previous full bio to make sure
+        * that we don't prematurely unlock the page.
+        */
+       if (iop)
+               atomic_inc(&iop->read_count);
+       if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
+               gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
+               int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
+               if (ctx->bio)
+                       submit_bio(ctx->bio);
+               if (ctx->is_readahead) /* same as readahead_gfp_mask */
+                       gfp |= __GFP_NORETRY | __GFP_NOWARN;
+               ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
+               ctx->bio->bi_opf = REQ_OP_READ;
+               if (ctx->is_readahead)
+                       ctx->bio->bi_opf |= REQ_RAHEAD;
+               ctx->bio->bi_iter.bi_sector = sector;
+               bio_set_dev(ctx->bio, iomap->bdev);
+               ctx->bio->bi_end_io = iomap_read_end_io;
+       }
+       bio_add_page(ctx->bio, page, plen, poff);
+ done:
+       /*
+        * Move the caller beyond our range so that it keeps making progress.
+        * For that we have to include any leading non-uptodate ranges, but
+        * we can skip trailing ones as they will be handled in the next
+        * iteration.
+        */
+       return pos - orig_pos + plen;
+ }
+ int
+ iomap_readpage(struct page *page, const struct iomap_ops *ops)
+ {
+       struct iomap_readpage_ctx ctx = { .cur_page = page };
+       struct inode *inode = page->mapping->host;
+       unsigned poff;
+       loff_t ret;
+       for (poff = 0; poff < PAGE_SIZE; poff += ret) {
+               ret = iomap_apply(inode, page_offset(page) + poff,
+                               PAGE_SIZE - poff, 0, ops, &ctx,
+                               iomap_readpage_actor);
+               if (ret <= 0) {
+                       WARN_ON_ONCE(ret == 0);
+                       SetPageError(page);
+                       break;
+               }
+       }
+       if (ctx.bio) {
+               submit_bio(ctx.bio);
+               WARN_ON_ONCE(!ctx.cur_page_in_bio);
+       } else {
+               WARN_ON_ONCE(ctx.cur_page_in_bio);
+               unlock_page(page);
+       }
+       /*
+        * Just like mpage_readpages and block_read_full_page we always
+        * return 0 and just mark the page as PageError on errors.  This
+        * should be cleaned up all through the stack eventually.
+        */
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(iomap_readpage);
+ static struct page *
+ iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
+               loff_t length, loff_t *done)
+ {
+       while (!list_empty(pages)) {
+               struct page *page = lru_to_page(pages);
+               if (page_offset(page) >= (u64)pos + length)
+                       break;
+               list_del(&page->lru);
+               if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
+                               GFP_NOFS))
+                       return page;
+               /*
+                * If we already have a page in the page cache at index we are
+                * done.  Upper layers don't care if it is uptodate after the
+                * readpages call itself as every page gets checked again once
+                * actually needed.
+                */
+               *done += PAGE_SIZE;
+               put_page(page);
+       }
+       return NULL;
+ }
+ static loff_t
+ iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
+               void *data, struct iomap *iomap)
+ {
+       struct iomap_readpage_ctx *ctx = data;
+       loff_t done, ret;
+       for (done = 0; done < length; done += ret) {
+               if (ctx->cur_page && offset_in_page(pos + done) == 0) {
+                       if (!ctx->cur_page_in_bio)
+                               unlock_page(ctx->cur_page);
+                       put_page(ctx->cur_page);
+                       ctx->cur_page = NULL;
+               }
+               if (!ctx->cur_page) {
+                       ctx->cur_page = iomap_next_page(inode, ctx->pages,
+                                       pos, length, &done);
+                       if (!ctx->cur_page)
+                               break;
+                       ctx->cur_page_in_bio = false;
+               }
+               ret = iomap_readpage_actor(inode, pos + done, length - done,
+                               ctx, iomap);
+       }
+       return done;
+ }
+ int
+ iomap_readpages(struct address_space *mapping, struct list_head *pages,
+               unsigned nr_pages, const struct iomap_ops *ops)
+ {
+       struct iomap_readpage_ctx ctx = {
+               .pages          = pages,
+               .is_readahead   = true,
+       };
+       loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
+       loff_t last = page_offset(list_entry(pages->next, struct page, lru));
+       loff_t length = last - pos + PAGE_SIZE, ret = 0;
+       while (length > 0) {
+               ret = iomap_apply(mapping->host, pos, length, 0, ops,
+                               &ctx, iomap_readpages_actor);
+               if (ret <= 0) {
+                       WARN_ON_ONCE(ret == 0);
+                       goto done;
+               }
+               pos += ret;
+               length -= ret;
+       }
+       ret = 0;
+ done:
+       if (ctx.bio)
+               submit_bio(ctx.bio);
+       if (ctx.cur_page) {
+               if (!ctx.cur_page_in_bio)
+                       unlock_page(ctx.cur_page);
+               put_page(ctx.cur_page);
+       }
+       /*
+        * Check that we didn't lose a page due to the arcance calling
+        * conventions..
+        */
+       WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(iomap_readpages);
+ /*
+  * iomap_is_partially_uptodate checks whether blocks within a page are
+  * uptodate or not.
+  *
+  * Returns true if all blocks which correspond to a file portion
+  * we want to read within the page are uptodate.
+  */
+ int
+ iomap_is_partially_uptodate(struct page *page, unsigned long from,
+               unsigned long count)
+ {
+       struct iomap_page *iop = to_iomap_page(page);
+       struct inode *inode = page->mapping->host;
+       unsigned len, first, last;
+       unsigned i;
+       /* Limit range to one page */
+       len = min_t(unsigned, PAGE_SIZE - from, count);
+       /* First and last blocks in range within page */
+       first = from >> inode->i_blkbits;
+       last = (from + len - 1) >> inode->i_blkbits;
+       if (iop) {
+               for (i = first; i <= last; i++)
+                       if (!test_bit(i, iop->uptodate))
+                               return 0;
+               return 1;
+       }
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
+ int
+ iomap_releasepage(struct page *page, gfp_t gfp_mask)
+ {
+       /*
+        * mm accommodates an old ext3 case where clean pages might not have had
+        * the dirty bit cleared. Thus, it can send actual dirty pages to
+        * ->releasepage() via shrink_active_list(), skip those here.
+        */
+       if (PageDirty(page) || PageWriteback(page))
+               return 0;
+       iomap_page_release(page);
+       return 1;
+ }
+ EXPORT_SYMBOL_GPL(iomap_releasepage);
+ void
+ iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
+ {
+       /*
+        * If we are invalidating the entire page, clear the dirty state from it
+        * and release it to avoid unnecessary buildup of the LRU.
+        */
+       if (offset == 0 && len == PAGE_SIZE) {
+               WARN_ON_ONCE(PageWriteback(page));
+               cancel_dirty_page(page);
+               iomap_page_release(page);
+       }
+ }
+ EXPORT_SYMBOL_GPL(iomap_invalidatepage);
+ #ifdef CONFIG_MIGRATION
+ int
+ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
+               struct page *page, enum migrate_mode mode)
+ {
+       int ret;
++      ret = migrate_page_move_mapping(mapping, newpage, page, 0);
+       if (ret != MIGRATEPAGE_SUCCESS)
+               return ret;
+       if (page_has_private(page)) {
+               ClearPagePrivate(page);
+               get_page(newpage);
+               set_page_private(newpage, page_private(page));
+               set_page_private(page, 0);
+               put_page(page);
+               SetPagePrivate(newpage);
+       }
+       if (mode != MIGRATE_SYNC_NO_COPY)
+               migrate_page_copy(newpage, page);
+       else
+               migrate_page_states(newpage, page);
+       return MIGRATEPAGE_SUCCESS;
+ }
+ EXPORT_SYMBOL_GPL(iomap_migrate_page);
+ #endif /* CONFIG_MIGRATION */
+ static void
+ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
+ {
+       loff_t i_size = i_size_read(inode);
+       /*
+        * Only truncate newly allocated pages beyoned EOF, even if the
+        * write started inside the existing inode size.
+        */
+       if (pos + len > i_size)
+               truncate_pagecache_range(inode, max(pos, i_size), pos + len);
+ }
+ static int
+ iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
+               unsigned poff, unsigned plen, unsigned from, unsigned to,
+               struct iomap *iomap)
+ {
+       struct bio_vec bvec;
+       struct bio bio;
+       if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
+               zero_user_segments(page, poff, from, to, poff + plen);
+               iomap_set_range_uptodate(page, poff, plen);
+               return 0;
+       }
+       bio_init(&bio, &bvec, 1);
+       bio.bi_opf = REQ_OP_READ;
+       bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
+       bio_set_dev(&bio, iomap->bdev);
+       __bio_add_page(&bio, page, plen, poff);
+       return submit_bio_wait(&bio);
+ }
+ static int
+ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
+               struct page *page, struct iomap *iomap)
+ {
+       struct iomap_page *iop = iomap_page_create(inode, page);
+       loff_t block_size = i_blocksize(inode);
+       loff_t block_start = pos & ~(block_size - 1);
+       loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
+       unsigned from = offset_in_page(pos), to = from + len, poff, plen;
+       int status = 0;
+       if (PageUptodate(page))
+               return 0;
+       do {
+               iomap_adjust_read_range(inode, iop, &block_start,
+                               block_end - block_start, &poff, &plen);
+               if (plen == 0)
+                       break;
+               if ((from > poff && from < poff + plen) ||
+                   (to > poff && to < poff + plen)) {
+                       status = iomap_read_page_sync(inode, block_start, page,
+                                       poff, plen, from, to, iomap);
+                       if (status)
+                               break;
+               }
+       } while ((block_start += plen) < block_end);
+       return status;
+ }
+ static int
+ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
+               struct page **pagep, struct iomap *iomap)
+ {
+       const struct iomap_page_ops *page_ops = iomap->page_ops;
+       pgoff_t index = pos >> PAGE_SHIFT;
+       struct page *page;
+       int status = 0;
+       BUG_ON(pos + len > iomap->offset + iomap->length);
+       if (fatal_signal_pending(current))
+               return -EINTR;
+       if (page_ops && page_ops->page_prepare) {
+               status = page_ops->page_prepare(inode, pos, len, iomap);
+               if (status)
+                       return status;
+       }
+       page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
+       if (!page) {
+               status = -ENOMEM;
+               goto out_no_page;
+       }
+       if (iomap->type == IOMAP_INLINE)
+               iomap_read_inline_data(inode, page, iomap);
+       else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
+               status = __block_write_begin_int(page, pos, len, NULL, iomap);
+       else
+               status = __iomap_write_begin(inode, pos, len, page, iomap);
+       if (unlikely(status))
+               goto out_unlock;
+       *pagep = page;
+       return 0;
+ out_unlock:
+       unlock_page(page);
+       put_page(page);
+       iomap_write_failed(inode, pos, len);
+ out_no_page:
+       if (page_ops && page_ops->page_done)
+               page_ops->page_done(inode, pos, 0, NULL, iomap);
+       return status;
+ }
+ int
+ iomap_set_page_dirty(struct page *page)
+ {
+       struct address_space *mapping = page_mapping(page);
+       int newly_dirty;
+       if (unlikely(!mapping))
+               return !TestSetPageDirty(page);
+       /*
+        * Lock out page->mem_cgroup migration to keep PageDirty
+        * synchronized with per-memcg dirty page counters.
+        */
+       lock_page_memcg(page);
+       newly_dirty = !TestSetPageDirty(page);
+       if (newly_dirty)
+               __set_page_dirty(page, mapping, 0);
+       unlock_page_memcg(page);
+       if (newly_dirty)
+               __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+       return newly_dirty;
+ }
+ EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
+ static int
+ __iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
+               unsigned copied, struct page *page, struct iomap *iomap)
+ {
+       flush_dcache_page(page);
+       /*
+        * The blocks that were entirely written will now be uptodate, so we
+        * don't have to worry about a readpage reading them and overwriting a
+        * partial write.  However if we have encountered a short write and only
+        * partially written into a block, it will not be marked uptodate, so a
+        * readpage might come in and destroy our partial write.
+        *
+        * Do the simplest thing, and just treat any short write to a non
+        * uptodate page as a zero-length write, and force the caller to redo
+        * the whole thing.
+        */
+       if (unlikely(copied < len && !PageUptodate(page)))
+               return 0;
+       iomap_set_range_uptodate(page, offset_in_page(pos), len);
+       iomap_set_page_dirty(page);
+       return copied;
+ }
+ static int
+ iomap_write_end_inline(struct inode *inode, struct page *page,
+               struct iomap *iomap, loff_t pos, unsigned copied)
+ {
+       void *addr;
+       WARN_ON_ONCE(!PageUptodate(page));
+       BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
+       addr = kmap_atomic(page);
+       memcpy(iomap->inline_data + pos, addr + pos, copied);
+       kunmap_atomic(addr);
+       mark_inode_dirty(inode);
+       return copied;
+ }
+ static int
+ iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
+               unsigned copied, struct page *page, struct iomap *iomap)
+ {
+       const struct iomap_page_ops *page_ops = iomap->page_ops;
+       loff_t old_size = inode->i_size;
+       int ret;
+       if (iomap->type == IOMAP_INLINE) {
+               ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
+       } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
+               ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
+                               page, NULL);
+       } else {
+               ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
+       }
+       /*
+        * Update the in-memory inode size after copying the data into the page
+        * cache.  It's up to the file system to write the updated size to disk,
+        * preferably after I/O completion so that no stale data is exposed.
+        */
+       if (pos + ret > old_size) {
+               i_size_write(inode, pos + ret);
+               iomap->flags |= IOMAP_F_SIZE_CHANGED;
+       }
+       unlock_page(page);
+       if (old_size < pos)
+               pagecache_isize_extended(inode, old_size, pos);
+       if (page_ops && page_ops->page_done)
+               page_ops->page_done(inode, pos, ret, page, iomap);
+       put_page(page);
+       if (ret < len)
+               iomap_write_failed(inode, pos, len);
+       return ret;
+ }
+ static loff_t
+ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+               struct iomap *iomap)
+ {
+       struct iov_iter *i = data;
+       long status = 0;
+       ssize_t written = 0;
+       unsigned int flags = AOP_FLAG_NOFS;
+       do {
+               struct page *page;
+               unsigned long offset;   /* Offset into pagecache page */
+               unsigned long bytes;    /* Bytes to write to page */
+               size_t copied;          /* Bytes copied from user */
+               offset = offset_in_page(pos);
+               bytes = min_t(unsigned long, PAGE_SIZE - offset,
+                                               iov_iter_count(i));
+ again:
+               if (bytes > length)
+                       bytes = length;
+               /*
+                * Bring in the user page that we will copy from _first_.
+                * Otherwise there's a nasty deadlock on copying from the
+                * same page as we're writing to, without it being marked
+                * up-to-date.
+                *
+                * Not only is this an optimisation, but it is also required
+                * to check that the address is actually valid, when atomic
+                * usercopies are used, below.
+                */
+               if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
+                       status = -EFAULT;
+                       break;
+               }
+               status = iomap_write_begin(inode, pos, bytes, flags, &page,
+                               iomap);
+               if (unlikely(status))
+                       break;
+               if (mapping_writably_mapped(inode->i_mapping))
+                       flush_dcache_page(page);
+               copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
+               flush_dcache_page(page);
+               status = iomap_write_end(inode, pos, bytes, copied, page,
+                               iomap);
+               if (unlikely(status < 0))
+                       break;
+               copied = status;
+               cond_resched();
+               iov_iter_advance(i, copied);
+               if (unlikely(copied == 0)) {
+                       /*
+                        * If we were unable to copy any data at all, we must
+                        * fall back to a single segment length write.
+                        *
+                        * If we didn't fallback here, we could livelock
+                        * because not all segments in the iov can be copied at
+                        * once without a pagefault.
+                        */
+                       bytes = min_t(unsigned long, PAGE_SIZE - offset,
+                                               iov_iter_single_seg_count(i));
+                       goto again;
+               }
+               pos += copied;
+               written += copied;
+               length -= copied;
+               balance_dirty_pages_ratelimited(inode->i_mapping);
+       } while (iov_iter_count(i) && length);
+       return written ? written : status;
+ }
+ ssize_t
+ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
+               const struct iomap_ops *ops)
+ {
+       struct inode *inode = iocb->ki_filp->f_mapping->host;
+       loff_t pos = iocb->ki_pos, ret = 0, written = 0;
+       while (iov_iter_count(iter)) {
+               ret = iomap_apply(inode, pos, iov_iter_count(iter),
+                               IOMAP_WRITE, ops, iter, iomap_write_actor);
+               if (ret <= 0)
+                       break;
+               pos += ret;
+               written += ret;
+       }
+       return written ? written : ret;
+ }
+ EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
+ static struct page *
+ __iomap_read_page(struct inode *inode, loff_t offset)
+ {
+       struct address_space *mapping = inode->i_mapping;
+       struct page *page;
+       page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
+       if (IS_ERR(page))
+               return page;
+       if (!PageUptodate(page)) {
+               put_page(page);
+               return ERR_PTR(-EIO);
+       }
+       return page;
+ }
+ static loff_t
+ iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+               struct iomap *iomap)
+ {
+       long status = 0;
+       ssize_t written = 0;
+       do {
+               struct page *page, *rpage;
+               unsigned long offset;   /* Offset into pagecache page */
+               unsigned long bytes;    /* Bytes to write to page */
+               offset = offset_in_page(pos);
+               bytes = min_t(loff_t, PAGE_SIZE - offset, length);
+               rpage = __iomap_read_page(inode, pos);
+               if (IS_ERR(rpage))
+                       return PTR_ERR(rpage);
+               status = iomap_write_begin(inode, pos, bytes,
+                                          AOP_FLAG_NOFS, &page, iomap);
+               put_page(rpage);
+               if (unlikely(status))
+                       return status;
+               WARN_ON_ONCE(!PageUptodate(page));
+               status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
+               if (unlikely(status <= 0)) {
+                       if (WARN_ON_ONCE(status == 0))
+                               return -EIO;
+                       return status;
+               }
+               cond_resched();
+               pos += status;
+               written += status;
+               length -= status;
+               balance_dirty_pages_ratelimited(inode->i_mapping);
+       } while (length);
+       return written;
+ }
+ int
+ iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
+               const struct iomap_ops *ops)
+ {
+       loff_t ret;
+       while (len) {
+               ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
+                               iomap_dirty_actor);
+               if (ret <= 0)
+                       return ret;
+               pos += ret;
+               len -= ret;
+       }
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(iomap_file_dirty);
+ static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
+               unsigned bytes, struct iomap *iomap)
+ {
+       struct page *page;
+       int status;
+       status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
+                                  iomap);
+       if (status)
+               return status;
+       zero_user(page, offset, bytes);
+       mark_page_accessed(page);
+       return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
+ }
+ static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
+               struct iomap *iomap)
+ {
+       return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
+                       iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
+ }
+ static loff_t
+ iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
+               void *data, struct iomap *iomap)
+ {
+       bool *did_zero = data;
+       loff_t written = 0;
+       int status;
+       /* already zeroed?  we're done. */
+       if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
+               return count;
+       do {
+               unsigned offset, bytes;
+               offset = offset_in_page(pos);
+               bytes = min_t(loff_t, PAGE_SIZE - offset, count);
+               if (IS_DAX(inode))
+                       status = iomap_dax_zero(pos, offset, bytes, iomap);
+               else
+                       status = iomap_zero(inode, pos, offset, bytes, iomap);
+               if (status < 0)
+                       return status;
+               pos += bytes;
+               count -= bytes;
+               written += bytes;
+               if (did_zero)
+                       *did_zero = true;
+       } while (count > 0);
+       return written;
+ }
+ int
+ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
+               const struct iomap_ops *ops)
+ {
+       loff_t ret;
+       while (len > 0) {
+               ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
+                               ops, did_zero, iomap_zero_range_actor);
+               if (ret <= 0)
+                       return ret;
+               pos += ret;
+               len -= ret;
+       }
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(iomap_zero_range);
+ int
+ iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
+               const struct iomap_ops *ops)
+ {
+       unsigned int blocksize = i_blocksize(inode);
+       unsigned int off = pos & (blocksize - 1);
+       /* Block boundary? Nothing to do */
+       if (!off)
+               return 0;
+       return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
+ }
+ EXPORT_SYMBOL_GPL(iomap_truncate_page);
+ static loff_t
+ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
+               void *data, struct iomap *iomap)
+ {
+       struct page *page = data;
+       int ret;
+       if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
+               ret = __block_write_begin_int(page, pos, length, NULL, iomap);
+               if (ret)
+                       return ret;
+               block_commit_write(page, 0, length);
+       } else {
+               WARN_ON_ONCE(!PageUptodate(page));
+               iomap_page_create(inode, page);
+               set_page_dirty(page);
+       }
+       return length;
+ }
+ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
+ {
+       struct page *page = vmf->page;
+       struct inode *inode = file_inode(vmf->vma->vm_file);
+       unsigned long length;
+       loff_t offset, size;
+       ssize_t ret;
+       lock_page(page);
+       size = i_size_read(inode);
+       if ((page->mapping != inode->i_mapping) ||
+           (page_offset(page) > size)) {
+               /* We overload EFAULT to mean page got truncated */
+               ret = -EFAULT;
+               goto out_unlock;
+       }
+       /* page is wholly or partially inside EOF */
+       if (((page->index + 1) << PAGE_SHIFT) > size)
+               length = offset_in_page(size);
+       else
+               length = PAGE_SIZE;
+       offset = page_offset(page);
+       while (length > 0) {
+               ret = iomap_apply(inode, offset, length,
+                               IOMAP_WRITE | IOMAP_FAULT, ops, page,
+                               iomap_page_mkwrite_actor);
+               if (unlikely(ret <= 0))
+                       goto out_unlock;
+               offset += ret;
+               length -= ret;
+       }
+       wait_for_stable_page(page);
+       return VM_FAULT_LOCKED;
+ out_unlock:
+       unlock_page(page);
+       return block_page_mkwrite_return(ret);
+ }
+ EXPORT_SYMBOL_GPL(iomap_page_mkwrite);