]> rtime.felk.cvut.cz Git - linux-imx.git/blob - fs/ext4/file.c
b1b4d51b5d86b4e54c179ddce5f5b574238b3629
[linux-imx.git] / fs / ext4 / file.c
1 /*
2  *  linux/fs/ext4/file.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/file.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  ext4 fs regular file handling primitives
16  *
17  *  64-bit file support on 64-bit platforms by Jakub Jelinek
18  *      (jj@sunsite.ms.mff.cuni.cz)
19  */
20
21 #include <linux/time.h>
22 #include <linux/fs.h>
23 #include <linux/jbd2.h>
24 #include <linux/mount.h>
25 #include <linux/path.h>
26 #include <linux/aio.h>
27 #include <linux/quotaops.h>
28 #include <linux/pagevec.h>
29 #include "ext4.h"
30 #include "ext4_jbd2.h"
31 #include "xattr.h"
32 #include "acl.h"
33
34 /*
35  * Called when an inode is released. Note that this is different
36  * from ext4_file_open: open gets called at every open, but release
37  * gets called only when /all/ the files are closed.
38  */
39 static int ext4_release_file(struct inode *inode, struct file *filp)
40 {
41         if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
42                 ext4_alloc_da_blocks(inode);
43                 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
44         }
45         /* if we are the last writer on the inode, drop the block reservation */
46         if ((filp->f_mode & FMODE_WRITE) &&
47                         (atomic_read(&inode->i_writecount) == 1) &&
48                         !EXT4_I(inode)->i_reserved_data_blocks)
49         {
50                 down_write(&EXT4_I(inode)->i_data_sem);
51                 ext4_discard_preallocations(inode);
52                 up_write(&EXT4_I(inode)->i_data_sem);
53         }
54         if (is_dx(inode) && filp->private_data)
55                 ext4_htree_free_dir_info(filp->private_data);
56
57         return 0;
58 }
59
60 void ext4_unwritten_wait(struct inode *inode)
61 {
62         wait_queue_head_t *wq = ext4_ioend_wq(inode);
63
64         wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
65 }
66
67 /*
68  * This tests whether the IO in question is block-aligned or not.
69  * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
70  * are converted to written only after the IO is complete.  Until they are
71  * mapped, these blocks appear as holes, so dio_zero_block() will assume that
72  * it needs to zero out portions of the start and/or end block.  If 2 AIO
73  * threads are at work on the same unwritten block, they must be synchronized
74  * or one thread will zero the other's data, causing corruption.
75  */
76 static int
77 ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
78                    unsigned long nr_segs, loff_t pos)
79 {
80         struct super_block *sb = inode->i_sb;
81         int blockmask = sb->s_blocksize - 1;
82         size_t count = iov_length(iov, nr_segs);
83         loff_t final_size = pos + count;
84
85         if (pos >= inode->i_size)
86                 return 0;
87
88         if ((pos & blockmask) || (final_size & blockmask))
89                 return 1;
90
91         return 0;
92 }
93
94 static ssize_t
95 ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
96                     unsigned long nr_segs, loff_t pos)
97 {
98         struct file *file = iocb->ki_filp;
99         struct inode *inode = file->f_mapping->host;
100         struct blk_plug plug;
101         int unaligned_aio = 0;
102         ssize_t ret;
103         int overwrite = 0;
104         size_t length = iov_length(iov, nr_segs);
105
106         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
107             !is_sync_kiocb(iocb))
108                 unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
109
110         /* Unaligned direct AIO must be serialized; see comment above */
111         if (unaligned_aio) {
112                 mutex_lock(ext4_aio_mutex(inode));
113                 ext4_unwritten_wait(inode);
114         }
115
116         BUG_ON(iocb->ki_pos != pos);
117
118         mutex_lock(&inode->i_mutex);
119         blk_start_plug(&plug);
120
121         iocb->private = &overwrite;
122
123         /* check whether we do a DIO overwrite or not */
124         if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
125             !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
126                 struct ext4_map_blocks map;
127                 unsigned int blkbits = inode->i_blkbits;
128                 int err, len;
129
130                 map.m_lblk = pos >> blkbits;
131                 map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
132                         - map.m_lblk;
133                 len = map.m_len;
134
135                 err = ext4_map_blocks(NULL, inode, &map, 0);
136                 /*
137                  * 'err==len' means that all of blocks has been preallocated no
138                  * matter they are initialized or not.  For excluding
139                  * uninitialized extents, we need to check m_flags.  There are
140                  * two conditions that indicate for initialized extents.
141                  * 1) If we hit extent cache, EXT4_MAP_MAPPED flag is returned;
142                  * 2) If we do a real lookup, non-flags are returned.
143                  * So we should check these two conditions.
144                  */
145                 if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
146                         overwrite = 1;
147         }
148
149         ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
150         mutex_unlock(&inode->i_mutex);
151
152         if (ret > 0 || ret == -EIOCBQUEUED) {
153                 ssize_t err;
154
155                 err = generic_write_sync(file, pos, ret);
156                 if (err < 0 && ret > 0)
157                         ret = err;
158         }
159         blk_finish_plug(&plug);
160
161         if (unaligned_aio)
162                 mutex_unlock(ext4_aio_mutex(inode));
163
164         return ret;
165 }
166
167 static ssize_t
168 ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
169                 unsigned long nr_segs, loff_t pos)
170 {
171         struct inode *inode = file_inode(iocb->ki_filp);
172         ssize_t ret;
173
174         /*
175          * If we have encountered a bitmap-format file, the size limit
176          * is smaller than s_maxbytes, which is for extent-mapped files.
177          */
178
179         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
180                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
181                 size_t length = iov_length(iov, nr_segs);
182
183                 if ((pos > sbi->s_bitmap_maxbytes ||
184                     (pos == sbi->s_bitmap_maxbytes && length > 0)))
185                         return -EFBIG;
186
187                 if (pos + length > sbi->s_bitmap_maxbytes) {
188                         nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
189                                               sbi->s_bitmap_maxbytes - pos);
190                 }
191         }
192
193         if (unlikely(iocb->ki_filp->f_flags & O_DIRECT))
194                 ret = ext4_file_dio_write(iocb, iov, nr_segs, pos);
195         else
196                 ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
197
198         return ret;
199 }
200
201 static const struct vm_operations_struct ext4_file_vm_ops = {
202         .fault          = filemap_fault,
203         .page_mkwrite   = ext4_page_mkwrite,
204         .remap_pages    = generic_file_remap_pages,
205 };
206
207 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
208 {
209         struct address_space *mapping = file->f_mapping;
210
211         if (!mapping->a_ops->readpage)
212                 return -ENOEXEC;
213         file_accessed(file);
214         vma->vm_ops = &ext4_file_vm_ops;
215         return 0;
216 }
217
218 static int ext4_file_open(struct inode * inode, struct file * filp)
219 {
220         struct super_block *sb = inode->i_sb;
221         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
222         struct ext4_inode_info *ei = EXT4_I(inode);
223         struct vfsmount *mnt = filp->f_path.mnt;
224         struct path path;
225         char buf[64], *cp;
226
227         if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
228                      !(sb->s_flags & MS_RDONLY))) {
229                 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
230                 /*
231                  * Sample where the filesystem has been mounted and
232                  * store it in the superblock for sysadmin convenience
233                  * when trying to sort through large numbers of block
234                  * devices or filesystem images.
235                  */
236                 memset(buf, 0, sizeof(buf));
237                 path.mnt = mnt;
238                 path.dentry = mnt->mnt_root;
239                 cp = d_path(&path, buf, sizeof(buf));
240                 if (!IS_ERR(cp)) {
241                         handle_t *handle;
242                         int err;
243
244                         handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
245                         if (IS_ERR(handle))
246                                 return PTR_ERR(handle);
247                         err = ext4_journal_get_write_access(handle, sbi->s_sbh);
248                         if (err) {
249                                 ext4_journal_stop(handle);
250                                 return err;
251                         }
252                         strlcpy(sbi->s_es->s_last_mounted, cp,
253                                 sizeof(sbi->s_es->s_last_mounted));
254                         ext4_handle_dirty_super(handle, sb);
255                         ext4_journal_stop(handle);
256                 }
257         }
258         /*
259          * Set up the jbd2_inode if we are opening the inode for
260          * writing and the journal is present
261          */
262         if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) {
263                 struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL);
264
265                 spin_lock(&inode->i_lock);
266                 if (!ei->jinode) {
267                         if (!jinode) {
268                                 spin_unlock(&inode->i_lock);
269                                 return -ENOMEM;
270                         }
271                         ei->jinode = jinode;
272                         jbd2_journal_init_jbd_inode(ei->jinode, inode);
273                         jinode = NULL;
274                 }
275                 spin_unlock(&inode->i_lock);
276                 if (unlikely(jinode != NULL))
277                         jbd2_free_inode(jinode);
278         }
279         return dquot_file_open(inode, filp);
280 }
281
282 /*
283  * Here we use ext4_map_blocks() to get a block mapping for a extent-based
284  * file rather than ext4_ext_walk_space() because we can introduce
285  * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
286  * function.  When extent status tree has been fully implemented, it will
287  * track all extent status for a file and we can directly use it to
288  * retrieve the offset for SEEK_DATA/SEEK_HOLE.
289  */
290
291 /*
292  * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
293  * lookup page cache to check whether or not there has some data between
294  * [startoff, endoff] because, if this range contains an unwritten extent,
295  * we determine this extent as a data or a hole according to whether the
296  * page cache has data or not.
297  */
298 static int ext4_find_unwritten_pgoff(struct inode *inode,
299                                      int whence,
300                                      struct ext4_map_blocks *map,
301                                      loff_t *offset)
302 {
303         struct pagevec pvec;
304         unsigned int blkbits;
305         pgoff_t index;
306         pgoff_t end;
307         loff_t endoff;
308         loff_t startoff;
309         loff_t lastoff;
310         int found = 0;
311
312         blkbits = inode->i_sb->s_blocksize_bits;
313         startoff = *offset;
314         lastoff = startoff;
315         endoff = (map->m_lblk + map->m_len) << blkbits;
316
317         index = startoff >> PAGE_CACHE_SHIFT;
318         end = endoff >> PAGE_CACHE_SHIFT;
319
320         pagevec_init(&pvec, 0);
321         do {
322                 int i, num;
323                 unsigned long nr_pages;
324
325                 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
326                 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
327                                           (pgoff_t)num);
328                 if (nr_pages == 0) {
329                         if (whence == SEEK_DATA)
330                                 break;
331
332                         BUG_ON(whence != SEEK_HOLE);
333                         /*
334                          * If this is the first time to go into the loop and
335                          * offset is not beyond the end offset, it will be a
336                          * hole at this offset
337                          */
338                         if (lastoff == startoff || lastoff < endoff)
339                                 found = 1;
340                         break;
341                 }
342
343                 /*
344                  * If this is the first time to go into the loop and
345                  * offset is smaller than the first page offset, it will be a
346                  * hole at this offset.
347                  */
348                 if (lastoff == startoff && whence == SEEK_HOLE &&
349                     lastoff < page_offset(pvec.pages[0])) {
350                         found = 1;
351                         break;
352                 }
353
354                 for (i = 0; i < nr_pages; i++) {
355                         struct page *page = pvec.pages[i];
356                         struct buffer_head *bh, *head;
357
358                         /*
359                          * If the current offset is not beyond the end of given
360                          * range, it will be a hole.
361                          */
362                         if (lastoff < endoff && whence == SEEK_HOLE &&
363                             page->index > end) {
364                                 found = 1;
365                                 *offset = lastoff;
366                                 goto out;
367                         }
368
369                         lock_page(page);
370
371                         if (unlikely(page->mapping != inode->i_mapping)) {
372                                 unlock_page(page);
373                                 continue;
374                         }
375
376                         if (!page_has_buffers(page)) {
377                                 unlock_page(page);
378                                 continue;
379                         }
380
381                         if (page_has_buffers(page)) {
382                                 lastoff = page_offset(page);
383                                 bh = head = page_buffers(page);
384                                 do {
385                                         if (buffer_uptodate(bh) ||
386                                             buffer_unwritten(bh)) {
387                                                 if (whence == SEEK_DATA)
388                                                         found = 1;
389                                         } else {
390                                                 if (whence == SEEK_HOLE)
391                                                         found = 1;
392                                         }
393                                         if (found) {
394                                                 *offset = max_t(loff_t,
395                                                         startoff, lastoff);
396                                                 unlock_page(page);
397                                                 goto out;
398                                         }
399                                         lastoff += bh->b_size;
400                                         bh = bh->b_this_page;
401                                 } while (bh != head);
402                         }
403
404                         lastoff = page_offset(page) + PAGE_SIZE;
405                         unlock_page(page);
406                 }
407
408                 /*
409                  * The no. of pages is less than our desired, that would be a
410                  * hole in there.
411                  */
412                 if (nr_pages < num && whence == SEEK_HOLE) {
413                         found = 1;
414                         *offset = lastoff;
415                         break;
416                 }
417
418                 index = pvec.pages[i - 1]->index + 1;
419                 pagevec_release(&pvec);
420         } while (index <= end);
421
422 out:
423         pagevec_release(&pvec);
424         return found;
425 }
426
427 /*
428  * ext4_seek_data() retrieves the offset for SEEK_DATA.
429  */
430 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
431 {
432         struct inode *inode = file->f_mapping->host;
433         struct ext4_map_blocks map;
434         struct extent_status es;
435         ext4_lblk_t start, last, end;
436         loff_t dataoff, isize;
437         int blkbits;
438         int ret = 0;
439
440         mutex_lock(&inode->i_mutex);
441
442         isize = i_size_read(inode);
443         if (offset >= isize) {
444                 mutex_unlock(&inode->i_mutex);
445                 return -ENXIO;
446         }
447
448         blkbits = inode->i_sb->s_blocksize_bits;
449         start = offset >> blkbits;
450         last = start;
451         end = isize >> blkbits;
452         dataoff = offset;
453
454         do {
455                 map.m_lblk = last;
456                 map.m_len = end - last + 1;
457                 ret = ext4_map_blocks(NULL, inode, &map, 0);
458                 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
459                         if (last != start)
460                                 dataoff = last << blkbits;
461                         break;
462                 }
463
464                 /*
465                  * If there is a delay extent at this offset,
466                  * it will be as a data.
467                  */
468                 ext4_es_find_delayed_extent_range(inode, last, last, &es);
469                 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
470                         if (last != start)
471                                 dataoff = last << blkbits;
472                         break;
473                 }
474
475                 /*
476                  * If there is a unwritten extent at this offset,
477                  * it will be as a data or a hole according to page
478                  * cache that has data or not.
479                  */
480                 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
481                         int unwritten;
482                         unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
483                                                               &map, &dataoff);
484                         if (unwritten)
485                                 break;
486                 }
487
488                 last++;
489                 dataoff = last << blkbits;
490         } while (last <= end);
491
492         mutex_unlock(&inode->i_mutex);
493
494         if (dataoff > isize)
495                 return -ENXIO;
496
497         if (dataoff < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
498                 return -EINVAL;
499         if (dataoff > maxsize)
500                 return -EINVAL;
501
502         if (dataoff != file->f_pos) {
503                 file->f_pos = dataoff;
504                 file->f_version = 0;
505         }
506
507         return dataoff;
508 }
509
510 /*
511  * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
512  */
513 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
514 {
515         struct inode *inode = file->f_mapping->host;
516         struct ext4_map_blocks map;
517         struct extent_status es;
518         ext4_lblk_t start, last, end;
519         loff_t holeoff, isize;
520         int blkbits;
521         int ret = 0;
522
523         mutex_lock(&inode->i_mutex);
524
525         isize = i_size_read(inode);
526         if (offset >= isize) {
527                 mutex_unlock(&inode->i_mutex);
528                 return -ENXIO;
529         }
530
531         blkbits = inode->i_sb->s_blocksize_bits;
532         start = offset >> blkbits;
533         last = start;
534         end = isize >> blkbits;
535         holeoff = offset;
536
537         do {
538                 map.m_lblk = last;
539                 map.m_len = end - last + 1;
540                 ret = ext4_map_blocks(NULL, inode, &map, 0);
541                 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
542                         last += ret;
543                         holeoff = last << blkbits;
544                         continue;
545                 }
546
547                 /*
548                  * If there is a delay extent at this offset,
549                  * we will skip this extent.
550                  */
551                 ext4_es_find_delayed_extent_range(inode, last, last, &es);
552                 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
553                         last = es.es_lblk + es.es_len;
554                         holeoff = last << blkbits;
555                         continue;
556                 }
557
558                 /*
559                  * If there is a unwritten extent at this offset,
560                  * it will be as a data or a hole according to page
561                  * cache that has data or not.
562                  */
563                 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
564                         int unwritten;
565                         unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
566                                                               &map, &holeoff);
567                         if (!unwritten) {
568                                 last += ret;
569                                 holeoff = last << blkbits;
570                                 continue;
571                         }
572                 }
573
574                 /* find a hole */
575                 break;
576         } while (last <= end);
577
578         mutex_unlock(&inode->i_mutex);
579
580         if (holeoff > isize)
581                 holeoff = isize;
582
583         if (holeoff < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
584                 return -EINVAL;
585         if (holeoff > maxsize)
586                 return -EINVAL;
587
588         if (holeoff != file->f_pos) {
589                 file->f_pos = holeoff;
590                 file->f_version = 0;
591         }
592
593         return holeoff;
594 }
595
596 /*
597  * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
598  * by calling generic_file_llseek_size() with the appropriate maxbytes
599  * value for each.
600  */
601 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
602 {
603         struct inode *inode = file->f_mapping->host;
604         loff_t maxbytes;
605
606         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
607                 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
608         else
609                 maxbytes = inode->i_sb->s_maxbytes;
610
611         switch (whence) {
612         case SEEK_SET:
613         case SEEK_CUR:
614         case SEEK_END:
615                 return generic_file_llseek_size(file, offset, whence,
616                                                 maxbytes, i_size_read(inode));
617         case SEEK_DATA:
618                 return ext4_seek_data(file, offset, maxbytes);
619         case SEEK_HOLE:
620                 return ext4_seek_hole(file, offset, maxbytes);
621         }
622
623         return -EINVAL;
624 }
625
626 const struct file_operations ext4_file_operations = {
627         .llseek         = ext4_llseek,
628         .read           = do_sync_read,
629         .write          = do_sync_write,
630         .aio_read       = generic_file_aio_read,
631         .aio_write      = ext4_file_write,
632         .unlocked_ioctl = ext4_ioctl,
633 #ifdef CONFIG_COMPAT
634         .compat_ioctl   = ext4_compat_ioctl,
635 #endif
636         .mmap           = ext4_file_mmap,
637         .open           = ext4_file_open,
638         .release        = ext4_release_file,
639         .fsync          = ext4_sync_file,
640         .splice_read    = generic_file_splice_read,
641         .splice_write   = generic_file_splice_write,
642         .fallocate      = ext4_fallocate,
643 };
644
645 const struct inode_operations ext4_file_inode_operations = {
646         .setattr        = ext4_setattr,
647         .getattr        = ext4_getattr,
648         .setxattr       = generic_setxattr,
649         .getxattr       = generic_getxattr,
650         .listxattr      = ext4_listxattr,
651         .removexattr    = generic_removexattr,
652         .get_acl        = ext4_get_acl,
653         .fiemap         = ext4_fiemap,
654 };
655