]> rtime.felk.cvut.cz Git - linux-imx.git/blob - fs/nfs/pnfs.c
2f86115e6ad0f57b615246f6bfb2995172f89f79
[linux-imx.git] / fs / nfs / pnfs.c
1 /*
2  *  pNFS functions to call and manage layout drivers.
3  *
4  *  Copyright (c) 2002 [year of first publication]
5  *  The Regents of the University of Michigan
6  *  All Rights Reserved
7  *
8  *  Dean Hildebrand <dhildebz@umich.edu>
9  *
10  *  Permission is granted to use, copy, create derivative works, and
11  *  redistribute this software and such derivative works for any purpose,
12  *  so long as the name of the University of Michigan is not used in
13  *  any advertising or publicity pertaining to the use or distribution
14  *  of this software without specific, written prior authorization. If
15  *  the above copyright notice or any other identification of the
16  *  University of Michigan is included in any copy of any portion of
17  *  this software, then the disclaimer below must also be included.
18  *
19  *  This software is provided as is, without representation or warranty
20  *  of any kind either express or implied, including without limitation
21  *  the implied warranties of merchantability, fitness for a particular
22  *  purpose, or noninfringement.  The Regents of the University of
23  *  Michigan shall not be liable for any damages, including special,
24  *  indirect, incidental, or consequential damages, with respect to any
25  *  claim arising out of or in connection with the use of the software,
26  *  even if it has been or is hereafter advised of the possibility of
27  *  such damages.
28  */
29
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
32 #include <linux/module.h>
33 #include "internal.h"
34 #include "pnfs.h"
35 #include "iostat.h"
36
37 #define NFSDBG_FACILITY         NFSDBG_PNFS
38 #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
39
40 /* Locking:
41  *
42  * pnfs_spinlock:
43  *      protects pnfs_modules_tbl.
44  */
45 static DEFINE_SPINLOCK(pnfs_spinlock);
46
47 /*
48  * pnfs_modules_tbl holds all pnfs modules
49  */
50 static LIST_HEAD(pnfs_modules_tbl);
51
52 /* Return the registered pnfs layout driver module matching given id */
53 static struct pnfs_layoutdriver_type *
54 find_pnfs_driver_locked(u32 id)
55 {
56         struct pnfs_layoutdriver_type *local;
57
58         list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
59                 if (local->id == id)
60                         goto out;
61         local = NULL;
62 out:
63         dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
64         return local;
65 }
66
67 static struct pnfs_layoutdriver_type *
68 find_pnfs_driver(u32 id)
69 {
70         struct pnfs_layoutdriver_type *local;
71
72         spin_lock(&pnfs_spinlock);
73         local = find_pnfs_driver_locked(id);
74         if (local != NULL && !try_module_get(local->owner)) {
75                 dprintk("%s: Could not grab reference on module\n", __func__);
76                 local = NULL;
77         }
78         spin_unlock(&pnfs_spinlock);
79         return local;
80 }
81
82 void
83 unset_pnfs_layoutdriver(struct nfs_server *nfss)
84 {
85         if (nfss->pnfs_curr_ld) {
86                 if (nfss->pnfs_curr_ld->clear_layoutdriver)
87                         nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
88                 /* Decrement the MDS count. Purge the deviceid cache if zero */
89                 if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
90                         nfs4_deviceid_purge_client(nfss->nfs_client);
91                 module_put(nfss->pnfs_curr_ld->owner);
92         }
93         nfss->pnfs_curr_ld = NULL;
94 }
95
96 /*
97  * Try to set the server's pnfs module to the pnfs layout type specified by id.
98  * Currently only one pNFS layout driver per filesystem is supported.
99  *
100  * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
101  */
102 void
103 set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
104                       u32 id)
105 {
106         struct pnfs_layoutdriver_type *ld_type = NULL;
107
108         if (id == 0)
109                 goto out_no_driver;
110         if (!(server->nfs_client->cl_exchange_flags &
111                  (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
112                 printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
113                         __func__, id, server->nfs_client->cl_exchange_flags);
114                 goto out_no_driver;
115         }
116         ld_type = find_pnfs_driver(id);
117         if (!ld_type) {
118                 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
119                 ld_type = find_pnfs_driver(id);
120                 if (!ld_type) {
121                         dprintk("%s: No pNFS module found for %u.\n",
122                                 __func__, id);
123                         goto out_no_driver;
124                 }
125         }
126         server->pnfs_curr_ld = ld_type;
127         if (ld_type->set_layoutdriver
128             && ld_type->set_layoutdriver(server, mntfh)) {
129                 printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
130                         "driver %u.\n", __func__, id);
131                 module_put(ld_type->owner);
132                 goto out_no_driver;
133         }
134         /* Bump the MDS count */
135         atomic_inc(&server->nfs_client->cl_mds_count);
136
137         dprintk("%s: pNFS module for %u set\n", __func__, id);
138         return;
139
140 out_no_driver:
141         dprintk("%s: Using NFSv4 I/O\n", __func__);
142         server->pnfs_curr_ld = NULL;
143 }
144
145 int
146 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
147 {
148         int status = -EINVAL;
149         struct pnfs_layoutdriver_type *tmp;
150
151         if (ld_type->id == 0) {
152                 printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
153                 return status;
154         }
155         if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
156                 printk(KERN_ERR "NFS: %s Layout driver must provide "
157                        "alloc_lseg and free_lseg.\n", __func__);
158                 return status;
159         }
160
161         spin_lock(&pnfs_spinlock);
162         tmp = find_pnfs_driver_locked(ld_type->id);
163         if (!tmp) {
164                 list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
165                 status = 0;
166                 dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
167                         ld_type->name);
168         } else {
169                 printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
170                         __func__, ld_type->id);
171         }
172         spin_unlock(&pnfs_spinlock);
173
174         return status;
175 }
176 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
177
178 void
179 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
180 {
181         dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
182         spin_lock(&pnfs_spinlock);
183         list_del(&ld_type->pnfs_tblid);
184         spin_unlock(&pnfs_spinlock);
185 }
186 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
187
188 /*
189  * pNFS client layout cache
190  */
191
192 /* Need to hold i_lock if caller does not already hold reference */
193 void
194 pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
195 {
196         atomic_inc(&lo->plh_refcount);
197 }
198
199 static struct pnfs_layout_hdr *
200 pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
201 {
202         struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
203         return ld->alloc_layout_hdr(ino, gfp_flags);
204 }
205
206 static void
207 pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
208 {
209         struct nfs_server *server = NFS_SERVER(lo->plh_inode);
210         struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
211
212         if (!list_empty(&lo->plh_layouts)) {
213                 struct nfs_client *clp = server->nfs_client;
214
215                 spin_lock(&clp->cl_lock);
216                 list_del_init(&lo->plh_layouts);
217                 spin_unlock(&clp->cl_lock);
218         }
219         put_rpccred(lo->plh_lc_cred);
220         return ld->free_layout_hdr(lo);
221 }
222
223 static void
224 pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
225 {
226         struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
227         dprintk("%s: freeing layout cache %p\n", __func__, lo);
228         nfsi->layout = NULL;
229         /* Reset MDS Threshold I/O counters */
230         nfsi->write_io = 0;
231         nfsi->read_io = 0;
232 }
233
234 void
235 pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
236 {
237         struct inode *inode = lo->plh_inode;
238
239         if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
240                 pnfs_detach_layout_hdr(lo);
241                 spin_unlock(&inode->i_lock);
242                 pnfs_free_layout_hdr(lo);
243         }
244 }
245
246 static int
247 pnfs_iomode_to_fail_bit(u32 iomode)
248 {
249         return iomode == IOMODE_RW ?
250                 NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
251 }
252
253 static void
254 pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
255 {
256         lo->plh_retry_timestamp = jiffies;
257         if (!test_and_set_bit(fail_bit, &lo->plh_flags))
258                 atomic_inc(&lo->plh_refcount);
259 }
260
261 static void
262 pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
263 {
264         if (test_and_clear_bit(fail_bit, &lo->plh_flags))
265                 atomic_dec(&lo->plh_refcount);
266 }
267
268 static void
269 pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
270 {
271         struct inode *inode = lo->plh_inode;
272         struct pnfs_layout_range range = {
273                 .iomode = iomode,
274                 .offset = 0,
275                 .length = NFS4_MAX_UINT64,
276         };
277         LIST_HEAD(head);
278
279         spin_lock(&inode->i_lock);
280         pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
281         pnfs_mark_matching_lsegs_invalid(lo, &head, &range);
282         spin_unlock(&inode->i_lock);
283         pnfs_free_lseg_list(&head);
284         dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
285                         iomode == IOMODE_RW ?  "RW" : "READ");
286 }
287
288 static bool
289 pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
290 {
291         unsigned long start, end;
292         int fail_bit = pnfs_iomode_to_fail_bit(iomode);
293
294         if (test_bit(fail_bit, &lo->plh_flags) == 0)
295                 return false;
296         end = jiffies;
297         start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
298         if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
299                 /* It is time to retry the failed layoutgets */
300                 pnfs_layout_clear_fail_bit(lo, fail_bit);
301                 return false;
302         }
303         return true;
304 }
305
306 static void
307 init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
308 {
309         INIT_LIST_HEAD(&lseg->pls_list);
310         INIT_LIST_HEAD(&lseg->pls_lc_list);
311         atomic_set(&lseg->pls_refcount, 1);
312         smp_mb();
313         set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
314         lseg->pls_layout = lo;
315 }
316
317 static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
318 {
319         struct inode *ino = lseg->pls_layout->plh_inode;
320
321         NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
322 }
323
324 static void
325 pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
326                 struct pnfs_layout_segment *lseg)
327 {
328         struct inode *inode = lo->plh_inode;
329
330         WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
331         list_del_init(&lseg->pls_list);
332         /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
333         atomic_dec(&lo->plh_refcount);
334         if (list_empty(&lo->plh_segs))
335                 clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
336         rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
337 }
338
339 void
340 pnfs_put_lseg(struct pnfs_layout_segment *lseg)
341 {
342         struct pnfs_layout_hdr *lo;
343         struct inode *inode;
344
345         if (!lseg)
346                 return;
347
348         dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
349                 atomic_read(&lseg->pls_refcount),
350                 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
351         lo = lseg->pls_layout;
352         inode = lo->plh_inode;
353         if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
354                 pnfs_get_layout_hdr(lo);
355                 pnfs_layout_remove_lseg(lo, lseg);
356                 spin_unlock(&inode->i_lock);
357                 pnfs_free_lseg(lseg);
358                 pnfs_put_layout_hdr(lo);
359         }
360 }
361 EXPORT_SYMBOL_GPL(pnfs_put_lseg);
362
363 static inline u64
364 end_offset(u64 start, u64 len)
365 {
366         u64 end;
367
368         end = start + len;
369         return end >= start ? end : NFS4_MAX_UINT64;
370 }
371
372 /*
373  * is l2 fully contained in l1?
374  *   start1                             end1
375  *   [----------------------------------)
376  *           start2           end2
377  *           [----------------)
378  */
379 static inline int
380 lo_seg_contained(struct pnfs_layout_range *l1,
381                  struct pnfs_layout_range *l2)
382 {
383         u64 start1 = l1->offset;
384         u64 end1 = end_offset(start1, l1->length);
385         u64 start2 = l2->offset;
386         u64 end2 = end_offset(start2, l2->length);
387
388         return (start1 <= start2) && (end1 >= end2);
389 }
390
391 /*
392  * is l1 and l2 intersecting?
393  *   start1                             end1
394  *   [----------------------------------)
395  *                              start2           end2
396  *                              [----------------)
397  */
398 static inline int
399 lo_seg_intersecting(struct pnfs_layout_range *l1,
400                     struct pnfs_layout_range *l2)
401 {
402         u64 start1 = l1->offset;
403         u64 end1 = end_offset(start1, l1->length);
404         u64 start2 = l2->offset;
405         u64 end2 = end_offset(start2, l2->length);
406
407         return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
408                (end2 == NFS4_MAX_UINT64 || end2 > start1);
409 }
410
411 static bool
412 should_free_lseg(struct pnfs_layout_range *lseg_range,
413                  struct pnfs_layout_range *recall_range)
414 {
415         return (recall_range->iomode == IOMODE_ANY ||
416                 lseg_range->iomode == recall_range->iomode) &&
417                lo_seg_intersecting(lseg_range, recall_range);
418 }
419
420 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
421                 struct list_head *tmp_list)
422 {
423         if (!atomic_dec_and_test(&lseg->pls_refcount))
424                 return false;
425         pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
426         list_add(&lseg->pls_list, tmp_list);
427         return true;
428 }
429
430 /* Returns 1 if lseg is removed from list, 0 otherwise */
431 static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
432                              struct list_head *tmp_list)
433 {
434         int rv = 0;
435
436         if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
437                 /* Remove the reference keeping the lseg in the
438                  * list.  It will now be removed when all
439                  * outstanding io is finished.
440                  */
441                 dprintk("%s: lseg %p ref %d\n", __func__, lseg,
442                         atomic_read(&lseg->pls_refcount));
443                 if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
444                         rv = 1;
445         }
446         return rv;
447 }
448
449 /* Returns count of number of matching invalid lsegs remaining in list
450  * after call.
451  */
452 int
453 pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
454                             struct list_head *tmp_list,
455                             struct pnfs_layout_range *recall_range)
456 {
457         struct pnfs_layout_segment *lseg, *next;
458         int invalid = 0, removed = 0;
459
460         dprintk("%s:Begin lo %p\n", __func__, lo);
461
462         if (list_empty(&lo->plh_segs))
463                 return 0;
464         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
465                 if (!recall_range ||
466                     should_free_lseg(&lseg->pls_range, recall_range)) {
467                         dprintk("%s: freeing lseg %p iomode %d "
468                                 "offset %llu length %llu\n", __func__,
469                                 lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
470                                 lseg->pls_range.length);
471                         invalid++;
472                         removed += mark_lseg_invalid(lseg, tmp_list);
473                 }
474         dprintk("%s:Return %i\n", __func__, invalid - removed);
475         return invalid - removed;
476 }
477
478 /* note free_me must contain lsegs from a single layout_hdr */
479 void
480 pnfs_free_lseg_list(struct list_head *free_me)
481 {
482         struct pnfs_layout_segment *lseg, *tmp;
483
484         if (list_empty(free_me))
485                 return;
486
487         list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
488                 list_del(&lseg->pls_list);
489                 pnfs_free_lseg(lseg);
490         }
491 }
492
493 void
494 pnfs_destroy_layout(struct nfs_inode *nfsi)
495 {
496         struct pnfs_layout_hdr *lo;
497         LIST_HEAD(tmp_list);
498
499         spin_lock(&nfsi->vfs_inode.i_lock);
500         lo = nfsi->layout;
501         if (lo) {
502                 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
503                 pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
504                 pnfs_get_layout_hdr(lo);
505                 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
506                 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
507                 spin_unlock(&nfsi->vfs_inode.i_lock);
508                 pnfs_free_lseg_list(&tmp_list);
509                 pnfs_put_layout_hdr(lo);
510         } else
511                 spin_unlock(&nfsi->vfs_inode.i_lock);
512 }
513 EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
514
515 static bool
516 pnfs_layout_add_bulk_destroy_list(struct inode *inode,
517                 struct list_head *layout_list)
518 {
519         struct pnfs_layout_hdr *lo;
520         bool ret = false;
521
522         spin_lock(&inode->i_lock);
523         lo = NFS_I(inode)->layout;
524         if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
525                 pnfs_get_layout_hdr(lo);
526                 list_add(&lo->plh_bulk_destroy, layout_list);
527                 ret = true;
528         }
529         spin_unlock(&inode->i_lock);
530         return ret;
531 }
532
533 /* Caller must hold rcu_read_lock and clp->cl_lock */
534 static int
535 pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
536                 struct nfs_server *server,
537                 struct list_head *layout_list)
538 {
539         struct pnfs_layout_hdr *lo, *next;
540         struct inode *inode;
541
542         list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
543                 inode = igrab(lo->plh_inode);
544                 if (inode == NULL)
545                         continue;
546                 list_del_init(&lo->plh_layouts);
547                 if (pnfs_layout_add_bulk_destroy_list(inode, layout_list))
548                         continue;
549                 rcu_read_unlock();
550                 spin_unlock(&clp->cl_lock);
551                 iput(inode);
552                 spin_lock(&clp->cl_lock);
553                 rcu_read_lock();
554                 return -EAGAIN;
555         }
556         return 0;
557 }
558
559 static int
560 pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
561                 bool is_bulk_recall)
562 {
563         struct pnfs_layout_hdr *lo;
564         struct inode *inode;
565         struct pnfs_layout_range range = {
566                 .iomode = IOMODE_ANY,
567                 .offset = 0,
568                 .length = NFS4_MAX_UINT64,
569         };
570         LIST_HEAD(lseg_list);
571         int ret = 0;
572
573         while (!list_empty(layout_list)) {
574                 lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
575                                 plh_bulk_destroy);
576                 dprintk("%s freeing layout for inode %lu\n", __func__,
577                         lo->plh_inode->i_ino);
578                 inode = lo->plh_inode;
579                 spin_lock(&inode->i_lock);
580                 list_del_init(&lo->plh_bulk_destroy);
581                 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
582                 if (is_bulk_recall)
583                         set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
584                 if (pnfs_mark_matching_lsegs_invalid(lo, &lseg_list, &range))
585                         ret = -EAGAIN;
586                 spin_unlock(&inode->i_lock);
587                 pnfs_free_lseg_list(&lseg_list);
588                 pnfs_put_layout_hdr(lo);
589                 iput(inode);
590         }
591         return ret;
592 }
593
594 int
595 pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
596                 struct nfs_fsid *fsid,
597                 bool is_recall)
598 {
599         struct nfs_server *server;
600         LIST_HEAD(layout_list);
601
602         spin_lock(&clp->cl_lock);
603         rcu_read_lock();
604 restart:
605         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
606                 if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
607                         continue;
608                 if (pnfs_layout_bulk_destroy_byserver_locked(clp,
609                                 server,
610                                 &layout_list) != 0)
611                         goto restart;
612         }
613         rcu_read_unlock();
614         spin_unlock(&clp->cl_lock);
615
616         if (list_empty(&layout_list))
617                 return 0;
618         return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
619 }
620
621 int
622 pnfs_destroy_layouts_byclid(struct nfs_client *clp,
623                 bool is_recall)
624 {
625         struct nfs_server *server;
626         LIST_HEAD(layout_list);
627
628         spin_lock(&clp->cl_lock);
629         rcu_read_lock();
630 restart:
631         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
632                 if (pnfs_layout_bulk_destroy_byserver_locked(clp,
633                                         server,
634                                         &layout_list) != 0)
635                         goto restart;
636         }
637         rcu_read_unlock();
638         spin_unlock(&clp->cl_lock);
639
640         if (list_empty(&layout_list))
641                 return 0;
642         return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
643 }
644
645 /*
646  * Called by the state manger to remove all layouts established under an
647  * expired lease.
648  */
649 void
650 pnfs_destroy_all_layouts(struct nfs_client *clp)
651 {
652         nfs4_deviceid_mark_client_invalid(clp);
653         nfs4_deviceid_purge_client(clp);
654
655         pnfs_destroy_layouts_byclid(clp, false);
656 }
657
658 /*
659  * Compare 2 layout stateid sequence ids, to see which is newer,
660  * taking into account wraparound issues.
661  */
662 static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
663 {
664         return (s32)s1 - (s32)s2 > 0;
665 }
666
667 /* update lo->plh_stateid with new if is more recent */
668 void
669 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
670                         bool update_barrier)
671 {
672         u32 oldseq, newseq, new_barrier;
673         int empty = list_empty(&lo->plh_segs);
674
675         oldseq = be32_to_cpu(lo->plh_stateid.seqid);
676         newseq = be32_to_cpu(new->seqid);
677         if (empty || pnfs_seqid_is_newer(newseq, oldseq)) {
678                 nfs4_stateid_copy(&lo->plh_stateid, new);
679                 if (update_barrier) {
680                         new_barrier = be32_to_cpu(new->seqid);
681                 } else {
682                         /* Because of wraparound, we want to keep the barrier
683                          * "close" to the current seqids.
684                          */
685                         new_barrier = newseq - atomic_read(&lo->plh_outstanding);
686                 }
687                 if (empty || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
688                         lo->plh_barrier = new_barrier;
689         }
690 }
691
692 static bool
693 pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
694                 const nfs4_stateid *stateid)
695 {
696         u32 seqid = be32_to_cpu(stateid->seqid);
697
698         return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
699 }
700
701 /* lget is set to 1 if called from inside send_layoutget call chain */
702 static bool
703 pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo, int lget)
704 {
705         return lo->plh_block_lgets ||
706                 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
707                 (list_empty(&lo->plh_segs) &&
708                  (atomic_read(&lo->plh_outstanding) > lget));
709 }
710
711 int
712 pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
713                               struct nfs4_state *open_state)
714 {
715         int status = 0;
716
717         dprintk("--> %s\n", __func__);
718         spin_lock(&lo->plh_inode->i_lock);
719         if (pnfs_layoutgets_blocked(lo, 1)) {
720                 status = -EAGAIN;
721         } else if (!nfs4_valid_open_stateid(open_state)) {
722                 status = -EBADF;
723         } else if (list_empty(&lo->plh_segs)) {
724                 int seq;
725
726                 do {
727                         seq = read_seqbegin(&open_state->seqlock);
728                         nfs4_stateid_copy(dst, &open_state->stateid);
729                 } while (read_seqretry(&open_state->seqlock, seq));
730         } else
731                 nfs4_stateid_copy(dst, &lo->plh_stateid);
732         spin_unlock(&lo->plh_inode->i_lock);
733         dprintk("<-- %s\n", __func__);
734         return status;
735 }
736
737 /*
738 * Get layout from server.
739 *    for now, assume that whole file layouts are requested.
740 *    arg->offset: 0
741 *    arg->length: all ones
742 */
743 static struct pnfs_layout_segment *
744 send_layoutget(struct pnfs_layout_hdr *lo,
745            struct nfs_open_context *ctx,
746            struct pnfs_layout_range *range,
747            gfp_t gfp_flags)
748 {
749         struct inode *ino = lo->plh_inode;
750         struct nfs_server *server = NFS_SERVER(ino);
751         struct nfs4_layoutget *lgp;
752         struct pnfs_layout_segment *lseg;
753
754         dprintk("--> %s\n", __func__);
755
756         lgp = kzalloc(sizeof(*lgp), gfp_flags);
757         if (lgp == NULL)
758                 return NULL;
759
760         lgp->args.minlength = PAGE_CACHE_SIZE;
761         if (lgp->args.minlength > range->length)
762                 lgp->args.minlength = range->length;
763         lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
764         lgp->args.range = *range;
765         lgp->args.type = server->pnfs_curr_ld->id;
766         lgp->args.inode = ino;
767         lgp->args.ctx = get_nfs_open_context(ctx);
768         lgp->gfp_flags = gfp_flags;
769         lgp->cred = lo->plh_lc_cred;
770
771         /* Synchronously retrieve layout information from server and
772          * store in lseg.
773          */
774         lseg = nfs4_proc_layoutget(lgp, gfp_flags);
775         if (IS_ERR(lseg)) {
776                 switch (PTR_ERR(lseg)) {
777                 case -ENOMEM:
778                 case -ERESTARTSYS:
779                         break;
780                 default:
781                         /* remember that LAYOUTGET failed and suspend trying */
782                         pnfs_layout_io_set_failed(lo, range->iomode);
783                 }
784                 return NULL;
785         }
786
787         return lseg;
788 }
789
790 static void pnfs_clear_layoutcommit(struct inode *inode,
791                 struct list_head *head)
792 {
793         struct nfs_inode *nfsi = NFS_I(inode);
794         struct pnfs_layout_segment *lseg, *tmp;
795
796         if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
797                 return;
798         list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
799                 if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
800                         continue;
801                 pnfs_lseg_dec_and_remove_zero(lseg, head);
802         }
803 }
804
805 /*
806  * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
807  * when the layout segment list is empty.
808  *
809  * Note that a pnfs_layout_hdr can exist with an empty layout segment
810  * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
811  * deviceid is marked invalid.
812  */
813 int
814 _pnfs_return_layout(struct inode *ino)
815 {
816         struct pnfs_layout_hdr *lo = NULL;
817         struct nfs_inode *nfsi = NFS_I(ino);
818         LIST_HEAD(tmp_list);
819         struct nfs4_layoutreturn *lrp;
820         nfs4_stateid stateid;
821         int status = 0, empty;
822
823         dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
824
825         spin_lock(&ino->i_lock);
826         lo = nfsi->layout;
827         if (!lo) {
828                 spin_unlock(&ino->i_lock);
829                 dprintk("NFS: %s no layout to return\n", __func__);
830                 goto out;
831         }
832         stateid = nfsi->layout->plh_stateid;
833         /* Reference matched in nfs4_layoutreturn_release */
834         pnfs_get_layout_hdr(lo);
835         empty = list_empty(&lo->plh_segs);
836         pnfs_clear_layoutcommit(ino, &tmp_list);
837         pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
838         /* Don't send a LAYOUTRETURN if list was initially empty */
839         if (empty) {
840                 spin_unlock(&ino->i_lock);
841                 pnfs_put_layout_hdr(lo);
842                 dprintk("NFS: %s no layout segments to return\n", __func__);
843                 goto out;
844         }
845         lo->plh_block_lgets++;
846         spin_unlock(&ino->i_lock);
847         pnfs_free_lseg_list(&tmp_list);
848
849         lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
850         if (unlikely(lrp == NULL)) {
851                 status = -ENOMEM;
852                 spin_lock(&ino->i_lock);
853                 lo->plh_block_lgets--;
854                 spin_unlock(&ino->i_lock);
855                 pnfs_put_layout_hdr(lo);
856                 goto out;
857         }
858
859         lrp->args.stateid = stateid;
860         lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
861         lrp->args.inode = ino;
862         lrp->args.layout = lo;
863         lrp->clp = NFS_SERVER(ino)->nfs_client;
864
865         status = nfs4_proc_layoutreturn(lrp);
866 out:
867         dprintk("<-- %s status: %d\n", __func__, status);
868         return status;
869 }
870 EXPORT_SYMBOL_GPL(_pnfs_return_layout);
871
872 int
873 pnfs_commit_and_return_layout(struct inode *inode)
874 {
875         struct pnfs_layout_hdr *lo;
876         int ret;
877
878         spin_lock(&inode->i_lock);
879         lo = NFS_I(inode)->layout;
880         if (lo == NULL) {
881                 spin_unlock(&inode->i_lock);
882                 return 0;
883         }
884         pnfs_get_layout_hdr(lo);
885         /* Block new layoutgets and read/write to ds */
886         lo->plh_block_lgets++;
887         spin_unlock(&inode->i_lock);
888         filemap_fdatawait(inode->i_mapping);
889         ret = pnfs_layoutcommit_inode(inode, true);
890         if (ret == 0)
891                 ret = _pnfs_return_layout(inode);
892         spin_lock(&inode->i_lock);
893         lo->plh_block_lgets--;
894         spin_unlock(&inode->i_lock);
895         pnfs_put_layout_hdr(lo);
896         return ret;
897 }
898
899 bool pnfs_roc(struct inode *ino)
900 {
901         struct pnfs_layout_hdr *lo;
902         struct pnfs_layout_segment *lseg, *tmp;
903         LIST_HEAD(tmp_list);
904         bool found = false;
905
906         spin_lock(&ino->i_lock);
907         lo = NFS_I(ino)->layout;
908         if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
909             test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
910                 goto out_nolayout;
911         list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
912                 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
913                         mark_lseg_invalid(lseg, &tmp_list);
914                         found = true;
915                 }
916         if (!found)
917                 goto out_nolayout;
918         lo->plh_block_lgets++;
919         pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
920         spin_unlock(&ino->i_lock);
921         pnfs_free_lseg_list(&tmp_list);
922         return true;
923
924 out_nolayout:
925         spin_unlock(&ino->i_lock);
926         return false;
927 }
928
929 void pnfs_roc_release(struct inode *ino)
930 {
931         struct pnfs_layout_hdr *lo;
932
933         spin_lock(&ino->i_lock);
934         lo = NFS_I(ino)->layout;
935         lo->plh_block_lgets--;
936         if (atomic_dec_and_test(&lo->plh_refcount)) {
937                 pnfs_detach_layout_hdr(lo);
938                 spin_unlock(&ino->i_lock);
939                 pnfs_free_layout_hdr(lo);
940         } else
941                 spin_unlock(&ino->i_lock);
942 }
943
944 void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
945 {
946         struct pnfs_layout_hdr *lo;
947
948         spin_lock(&ino->i_lock);
949         lo = NFS_I(ino)->layout;
950         if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
951                 lo->plh_barrier = barrier;
952         spin_unlock(&ino->i_lock);
953 }
954
955 bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
956 {
957         struct nfs_inode *nfsi = NFS_I(ino);
958         struct pnfs_layout_hdr *lo;
959         struct pnfs_layout_segment *lseg;
960         u32 current_seqid;
961         bool found = false;
962
963         spin_lock(&ino->i_lock);
964         list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
965                 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
966                         rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
967                         found = true;
968                         goto out;
969                 }
970         lo = nfsi->layout;
971         current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
972
973         /* Since close does not return a layout stateid for use as
974          * a barrier, we choose the worst-case barrier.
975          */
976         *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
977 out:
978         spin_unlock(&ino->i_lock);
979         return found;
980 }
981
982 /*
983  * Compare two layout segments for sorting into layout cache.
984  * We want to preferentially return RW over RO layouts, so ensure those
985  * are seen first.
986  */
987 static s64
988 cmp_layout(struct pnfs_layout_range *l1,
989            struct pnfs_layout_range *l2)
990 {
991         s64 d;
992
993         /* high offset > low offset */
994         d = l1->offset - l2->offset;
995         if (d)
996                 return d;
997
998         /* short length > long length */
999         d = l2->length - l1->length;
1000         if (d)
1001                 return d;
1002
1003         /* read > read/write */
1004         return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
1005 }
1006
1007 static void
1008 pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1009                    struct pnfs_layout_segment *lseg)
1010 {
1011         struct pnfs_layout_segment *lp;
1012
1013         dprintk("%s:Begin\n", __func__);
1014
1015         list_for_each_entry(lp, &lo->plh_segs, pls_list) {
1016                 if (cmp_layout(&lseg->pls_range, &lp->pls_range) > 0)
1017                         continue;
1018                 list_add_tail(&lseg->pls_list, &lp->pls_list);
1019                 dprintk("%s: inserted lseg %p "
1020                         "iomode %d offset %llu length %llu before "
1021                         "lp %p iomode %d offset %llu length %llu\n",
1022                         __func__, lseg, lseg->pls_range.iomode,
1023                         lseg->pls_range.offset, lseg->pls_range.length,
1024                         lp, lp->pls_range.iomode, lp->pls_range.offset,
1025                         lp->pls_range.length);
1026                 goto out;
1027         }
1028         list_add_tail(&lseg->pls_list, &lo->plh_segs);
1029         dprintk("%s: inserted lseg %p "
1030                 "iomode %d offset %llu length %llu at tail\n",
1031                 __func__, lseg, lseg->pls_range.iomode,
1032                 lseg->pls_range.offset, lseg->pls_range.length);
1033 out:
1034         pnfs_get_layout_hdr(lo);
1035
1036         dprintk("%s:Return\n", __func__);
1037 }
1038
1039 static struct pnfs_layout_hdr *
1040 alloc_init_layout_hdr(struct inode *ino,
1041                       struct nfs_open_context *ctx,
1042                       gfp_t gfp_flags)
1043 {
1044         struct pnfs_layout_hdr *lo;
1045
1046         lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
1047         if (!lo)
1048                 return NULL;
1049         atomic_set(&lo->plh_refcount, 1);
1050         INIT_LIST_HEAD(&lo->plh_layouts);
1051         INIT_LIST_HEAD(&lo->plh_segs);
1052         INIT_LIST_HEAD(&lo->plh_bulk_destroy);
1053         lo->plh_inode = ino;
1054         lo->plh_lc_cred = get_rpccred(ctx->state->owner->so_cred);
1055         return lo;
1056 }
1057
1058 static struct pnfs_layout_hdr *
1059 pnfs_find_alloc_layout(struct inode *ino,
1060                        struct nfs_open_context *ctx,
1061                        gfp_t gfp_flags)
1062 {
1063         struct nfs_inode *nfsi = NFS_I(ino);
1064         struct pnfs_layout_hdr *new = NULL;
1065
1066         dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
1067
1068         if (nfsi->layout != NULL)
1069                 goto out_existing;
1070         spin_unlock(&ino->i_lock);
1071         new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
1072         spin_lock(&ino->i_lock);
1073
1074         if (likely(nfsi->layout == NULL)) {     /* Won the race? */
1075                 nfsi->layout = new;
1076                 return new;
1077         } else if (new != NULL)
1078                 pnfs_free_layout_hdr(new);
1079 out_existing:
1080         pnfs_get_layout_hdr(nfsi->layout);
1081         return nfsi->layout;
1082 }
1083
1084 /*
1085  * iomode matching rules:
1086  * iomode       lseg    match
1087  * -----        -----   -----
1088  * ANY          READ    true
1089  * ANY          RW      true
1090  * RW           READ    false
1091  * RW           RW      true
1092  * READ         READ    true
1093  * READ         RW      true
1094  */
1095 static int
1096 is_matching_lseg(struct pnfs_layout_range *ls_range,
1097                  struct pnfs_layout_range *range)
1098 {
1099         struct pnfs_layout_range range1;
1100
1101         if ((range->iomode == IOMODE_RW &&
1102              ls_range->iomode != IOMODE_RW) ||
1103             !lo_seg_intersecting(ls_range, range))
1104                 return 0;
1105
1106         /* range1 covers only the first byte in the range */
1107         range1 = *range;
1108         range1.length = 1;
1109         return lo_seg_contained(ls_range, &range1);
1110 }
1111
1112 /*
1113  * lookup range in layout
1114  */
1115 static struct pnfs_layout_segment *
1116 pnfs_find_lseg(struct pnfs_layout_hdr *lo,
1117                 struct pnfs_layout_range *range)
1118 {
1119         struct pnfs_layout_segment *lseg, *ret = NULL;
1120
1121         dprintk("%s:Begin\n", __func__);
1122
1123         list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
1124                 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
1125                     is_matching_lseg(&lseg->pls_range, range)) {
1126                         ret = pnfs_get_lseg(lseg);
1127                         break;
1128                 }
1129                 if (lseg->pls_range.offset > range->offset)
1130                         break;
1131         }
1132
1133         dprintk("%s:Return lseg %p ref %d\n",
1134                 __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
1135         return ret;
1136 }
1137
1138 /*
1139  * Use mdsthreshold hints set at each OPEN to determine if I/O should go
1140  * to the MDS or over pNFS
1141  *
1142  * The nfs_inode read_io and write_io fields are cumulative counters reset
1143  * when there are no layout segments. Note that in pnfs_update_layout iomode
1144  * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
1145  * WRITE request.
1146  *
1147  * A return of true means use MDS I/O.
1148  *
1149  * From rfc 5661:
1150  * If a file's size is smaller than the file size threshold, data accesses
1151  * SHOULD be sent to the metadata server.  If an I/O request has a length that
1152  * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
1153  * server.  If both file size and I/O size are provided, the client SHOULD
1154  * reach or exceed  both thresholds before sending its read or write
1155  * requests to the data server.
1156  */
1157 static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
1158                                      struct inode *ino, int iomode)
1159 {
1160         struct nfs4_threshold *t = ctx->mdsthreshold;
1161         struct nfs_inode *nfsi = NFS_I(ino);
1162         loff_t fsize = i_size_read(ino);
1163         bool size = false, size_set = false, io = false, io_set = false, ret = false;
1164
1165         if (t == NULL)
1166                 return ret;
1167
1168         dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1169                 __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
1170
1171         switch (iomode) {
1172         case IOMODE_READ:
1173                 if (t->bm & THRESHOLD_RD) {
1174                         dprintk("%s fsize %llu\n", __func__, fsize);
1175                         size_set = true;
1176                         if (fsize < t->rd_sz)
1177                                 size = true;
1178                 }
1179                 if (t->bm & THRESHOLD_RD_IO) {
1180                         dprintk("%s nfsi->read_io %llu\n", __func__,
1181                                 nfsi->read_io);
1182                         io_set = true;
1183                         if (nfsi->read_io < t->rd_io_sz)
1184                                 io = true;
1185                 }
1186                 break;
1187         case IOMODE_RW:
1188                 if (t->bm & THRESHOLD_WR) {
1189                         dprintk("%s fsize %llu\n", __func__, fsize);
1190                         size_set = true;
1191                         if (fsize < t->wr_sz)
1192                                 size = true;
1193                 }
1194                 if (t->bm & THRESHOLD_WR_IO) {
1195                         dprintk("%s nfsi->write_io %llu\n", __func__,
1196                                 nfsi->write_io);
1197                         io_set = true;
1198                         if (nfsi->write_io < t->wr_io_sz)
1199                                 io = true;
1200                 }
1201                 break;
1202         }
1203         if (size_set && io_set) {
1204                 if (size && io)
1205                         ret = true;
1206         } else if (size || io)
1207                 ret = true;
1208
1209         dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
1210         return ret;
1211 }
1212
1213 /*
1214  * Layout segment is retreived from the server if not cached.
1215  * The appropriate layout segment is referenced and returned to the caller.
1216  */
1217 struct pnfs_layout_segment *
1218 pnfs_update_layout(struct inode *ino,
1219                    struct nfs_open_context *ctx,
1220                    loff_t pos,
1221                    u64 count,
1222                    enum pnfs_iomode iomode,
1223                    gfp_t gfp_flags)
1224 {
1225         struct pnfs_layout_range arg = {
1226                 .iomode = iomode,
1227                 .offset = pos,
1228                 .length = count,
1229         };
1230         unsigned pg_offset;
1231         struct nfs_server *server = NFS_SERVER(ino);
1232         struct nfs_client *clp = server->nfs_client;
1233         struct pnfs_layout_hdr *lo;
1234         struct pnfs_layout_segment *lseg = NULL;
1235         bool first;
1236
1237         if (!pnfs_enabled_sb(NFS_SERVER(ino)))
1238                 goto out;
1239
1240         if (pnfs_within_mdsthreshold(ctx, ino, iomode))
1241                 goto out;
1242
1243         spin_lock(&ino->i_lock);
1244         lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1245         if (lo == NULL) {
1246                 spin_unlock(&ino->i_lock);
1247                 goto out;
1248         }
1249
1250         /* Do we even need to bother with this? */
1251         if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1252                 dprintk("%s matches recall, use MDS\n", __func__);
1253                 goto out_unlock;
1254         }
1255
1256         /* if LAYOUTGET already failed once we don't try again */
1257         if (pnfs_layout_io_test_failed(lo, iomode))
1258                 goto out_unlock;
1259
1260         /* Check to see if the layout for the given range already exists */
1261         lseg = pnfs_find_lseg(lo, &arg);
1262         if (lseg)
1263                 goto out_unlock;
1264
1265         if (pnfs_layoutgets_blocked(lo, 0))
1266                 goto out_unlock;
1267         atomic_inc(&lo->plh_outstanding);
1268
1269         first = list_empty(&lo->plh_layouts) ? true : false;
1270         spin_unlock(&ino->i_lock);
1271
1272         if (first) {
1273                 /* The lo must be on the clp list if there is any
1274                  * chance of a CB_LAYOUTRECALL(FILE) coming in.
1275                  */
1276                 spin_lock(&clp->cl_lock);
1277                 list_add_tail(&lo->plh_layouts, &server->layouts);
1278                 spin_unlock(&clp->cl_lock);
1279         }
1280
1281         pg_offset = arg.offset & ~PAGE_CACHE_MASK;
1282         if (pg_offset) {
1283                 arg.offset -= pg_offset;
1284                 arg.length += pg_offset;
1285         }
1286         if (arg.length != NFS4_MAX_UINT64)
1287                 arg.length = PAGE_CACHE_ALIGN(arg.length);
1288
1289         lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
1290         atomic_dec(&lo->plh_outstanding);
1291 out_put_layout_hdr:
1292         pnfs_put_layout_hdr(lo);
1293 out:
1294         dprintk("%s: inode %s/%llu pNFS layout segment %s for "
1295                         "(%s, offset: %llu, length: %llu)\n",
1296                         __func__, ino->i_sb->s_id,
1297                         (unsigned long long)NFS_FILEID(ino),
1298                         lseg == NULL ? "not found" : "found",
1299                         iomode==IOMODE_RW ?  "read/write" : "read-only",
1300                         (unsigned long long)pos,
1301                         (unsigned long long)count);
1302         return lseg;
1303 out_unlock:
1304         spin_unlock(&ino->i_lock);
1305         goto out_put_layout_hdr;
1306 }
1307 EXPORT_SYMBOL_GPL(pnfs_update_layout);
1308
1309 struct pnfs_layout_segment *
1310 pnfs_layout_process(struct nfs4_layoutget *lgp)
1311 {
1312         struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
1313         struct nfs4_layoutget_res *res = &lgp->res;
1314         struct pnfs_layout_segment *lseg;
1315         struct inode *ino = lo->plh_inode;
1316         int status = 0;
1317
1318         /* Inject layout blob into I/O device driver */
1319         lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
1320         if (!lseg || IS_ERR(lseg)) {
1321                 if (!lseg)
1322                         status = -ENOMEM;
1323                 else
1324                         status = PTR_ERR(lseg);
1325                 dprintk("%s: Could not allocate layout: error %d\n",
1326                        __func__, status);
1327                 goto out;
1328         }
1329
1330         spin_lock(&ino->i_lock);
1331         if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1332                 dprintk("%s forget reply due to recall\n", __func__);
1333                 goto out_forget_reply;
1334         }
1335
1336         if (pnfs_layoutgets_blocked(lo, 1) ||
1337             pnfs_layout_stateid_blocked(lo, &res->stateid)) {
1338                 dprintk("%s forget reply due to state\n", __func__);
1339                 goto out_forget_reply;
1340         }
1341
1342         /* Done processing layoutget. Set the layout stateid */
1343         pnfs_set_layout_stateid(lo, &res->stateid, false);
1344
1345         init_lseg(lo, lseg);
1346         lseg->pls_range = res->range;
1347         pnfs_get_lseg(lseg);
1348         pnfs_layout_insert_lseg(lo, lseg);
1349
1350         if (res->return_on_close) {
1351                 set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
1352                 set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
1353         }
1354
1355         spin_unlock(&ino->i_lock);
1356         return lseg;
1357 out:
1358         return ERR_PTR(status);
1359
1360 out_forget_reply:
1361         spin_unlock(&ino->i_lock);
1362         lseg->pls_layout = lo;
1363         NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
1364         goto out;
1365 }
1366
1367 void
1368 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1369 {
1370         u64 rd_size = req->wb_bytes;
1371
1372         WARN_ON_ONCE(pgio->pg_lseg != NULL);
1373
1374         if (req->wb_offset != req->wb_pgbase) {
1375                 nfs_pageio_reset_read_mds(pgio);
1376                 return;
1377         }
1378
1379         if (pgio->pg_dreq == NULL)
1380                 rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
1381         else
1382                 rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
1383
1384         pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1385                                            req->wb_context,
1386                                            req_offset(req),
1387                                            rd_size,
1388                                            IOMODE_READ,
1389                                            GFP_KERNEL);
1390         /* If no lseg, fall back to read through mds */
1391         if (pgio->pg_lseg == NULL)
1392                 nfs_pageio_reset_read_mds(pgio);
1393
1394 }
1395 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
1396
1397 void
1398 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
1399                            struct nfs_page *req, u64 wb_size)
1400 {
1401         WARN_ON_ONCE(pgio->pg_lseg != NULL);
1402
1403         if (req->wb_offset != req->wb_pgbase) {
1404                 nfs_pageio_reset_write_mds(pgio);
1405                 return;
1406         }
1407
1408         pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1409                                            req->wb_context,
1410                                            req_offset(req),
1411                                            wb_size,
1412                                            IOMODE_RW,
1413                                            GFP_NOFS);
1414         /* If no lseg, fall back to write through mds */
1415         if (pgio->pg_lseg == NULL)
1416                 nfs_pageio_reset_write_mds(pgio);
1417 }
1418 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
1419
1420 void
1421 pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
1422                       const struct nfs_pgio_completion_ops *compl_ops)
1423 {
1424         struct nfs_server *server = NFS_SERVER(inode);
1425         struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
1426
1427         if (ld == NULL)
1428                 nfs_pageio_init_read(pgio, inode, compl_ops);
1429         else
1430                 nfs_pageio_init(pgio, inode, ld->pg_read_ops, compl_ops, server->rsize, 0);
1431 }
1432
1433 void
1434 pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode,
1435                        int ioflags,
1436                        const struct nfs_pgio_completion_ops *compl_ops)
1437 {
1438         struct nfs_server *server = NFS_SERVER(inode);
1439         struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
1440
1441         if (ld == NULL)
1442                 nfs_pageio_init_write(pgio, inode, ioflags, compl_ops);
1443         else
1444                 nfs_pageio_init(pgio, inode, ld->pg_write_ops, compl_ops, server->wsize, ioflags);
1445 }
1446
1447 bool
1448 pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1449                      struct nfs_page *req)
1450 {
1451         if (pgio->pg_lseg == NULL)
1452                 return nfs_generic_pg_test(pgio, prev, req);
1453
1454         /*
1455          * Test if a nfs_page is fully contained in the pnfs_layout_range.
1456          * Note that this test makes several assumptions:
1457          * - that the previous nfs_page in the struct nfs_pageio_descriptor
1458          *   is known to lie within the range.
1459          *   - that the nfs_page being tested is known to be contiguous with the
1460          *   previous nfs_page.
1461          *   - Layout ranges are page aligned, so we only have to test the
1462          *   start offset of the request.
1463          *
1464          * Please also note that 'end_offset' is actually the offset of the
1465          * first byte that lies outside the pnfs_layout_range. FIXME?
1466          *
1467          */
1468         return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset,
1469                                          pgio->pg_lseg->pls_range.length);
1470 }
1471 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
1472
1473 int pnfs_write_done_resend_to_mds(struct inode *inode,
1474                                 struct list_head *head,
1475                                 const struct nfs_pgio_completion_ops *compl_ops,
1476                                 struct nfs_direct_req *dreq)
1477 {
1478         struct nfs_pageio_descriptor pgio;
1479         LIST_HEAD(failed);
1480
1481         /* Resend all requests through the MDS */
1482         nfs_pageio_init_write(&pgio, inode, FLUSH_STABLE, compl_ops);
1483         pgio.pg_dreq = dreq;
1484         while (!list_empty(head)) {
1485                 struct nfs_page *req = nfs_list_entry(head->next);
1486
1487                 nfs_list_remove_request(req);
1488                 if (!nfs_pageio_add_request(&pgio, req))
1489                         nfs_list_add_request(req, &failed);
1490         }
1491         nfs_pageio_complete(&pgio);
1492
1493         if (!list_empty(&failed)) {
1494                 /* For some reason our attempt to resend pages. Mark the
1495                  * overall send request as having failed, and let
1496                  * nfs_writeback_release_full deal with the error.
1497                  */
1498                 list_move(&failed, head);
1499                 return -EIO;
1500         }
1501         return 0;
1502 }
1503 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
1504
1505 static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
1506 {
1507         struct nfs_pgio_header *hdr = data->header;
1508
1509         dprintk("pnfs write error = %d\n", hdr->pnfs_error);
1510         if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1511             PNFS_LAYOUTRET_ON_ERROR) {
1512                 pnfs_return_layout(hdr->inode);
1513         }
1514         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1515                 data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
1516                                                         &hdr->pages,
1517                                                         hdr->completion_ops,
1518                                                         hdr->dreq);
1519 }
1520
1521 /*
1522  * Called by non rpc-based layout drivers
1523  */
1524 void pnfs_ld_write_done(struct nfs_write_data *data)
1525 {
1526         struct nfs_pgio_header *hdr = data->header;
1527
1528         if (!hdr->pnfs_error) {
1529                 pnfs_set_layoutcommit(data);
1530                 hdr->mds_ops->rpc_call_done(&data->task, data);
1531         } else
1532                 pnfs_ld_handle_write_error(data);
1533         hdr->mds_ops->rpc_release(data);
1534 }
1535 EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
1536
1537 static void
1538 pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
1539                 struct nfs_write_data *data)
1540 {
1541         struct nfs_pgio_header *hdr = data->header;
1542
1543         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1544                 list_splice_tail_init(&hdr->pages, &desc->pg_list);
1545                 nfs_pageio_reset_write_mds(desc);
1546                 desc->pg_recoalesce = 1;
1547         }
1548         nfs_writedata_release(data);
1549 }
1550
1551 static enum pnfs_try_status
1552 pnfs_try_to_write_data(struct nfs_write_data *wdata,
1553                         const struct rpc_call_ops *call_ops,
1554                         struct pnfs_layout_segment *lseg,
1555                         int how)
1556 {
1557         struct nfs_pgio_header *hdr = wdata->header;
1558         struct inode *inode = hdr->inode;
1559         enum pnfs_try_status trypnfs;
1560         struct nfs_server *nfss = NFS_SERVER(inode);
1561
1562         hdr->mds_ops = call_ops;
1563
1564         dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
1565                 inode->i_ino, wdata->args.count, wdata->args.offset, how);
1566         trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
1567         if (trypnfs != PNFS_NOT_ATTEMPTED)
1568                 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
1569         dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1570         return trypnfs;
1571 }
1572
1573 static void
1574 pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *head, int how)
1575 {
1576         struct nfs_write_data *data;
1577         const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1578         struct pnfs_layout_segment *lseg = desc->pg_lseg;
1579
1580         desc->pg_lseg = NULL;
1581         while (!list_empty(head)) {
1582                 enum pnfs_try_status trypnfs;
1583
1584                 data = list_first_entry(head, struct nfs_write_data, list);
1585                 list_del_init(&data->list);
1586
1587                 trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
1588                 if (trypnfs == PNFS_NOT_ATTEMPTED)
1589                         pnfs_write_through_mds(desc, data);
1590         }
1591         pnfs_put_lseg(lseg);
1592 }
1593
1594 static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
1595 {
1596         pnfs_put_lseg(hdr->lseg);
1597         nfs_writehdr_free(hdr);
1598 }
1599 EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
1600
1601 int
1602 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1603 {
1604         struct nfs_write_header *whdr;
1605         struct nfs_pgio_header *hdr;
1606         int ret;
1607
1608         whdr = nfs_writehdr_alloc();
1609         if (!whdr) {
1610                 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1611                 pnfs_put_lseg(desc->pg_lseg);
1612                 desc->pg_lseg = NULL;
1613                 return -ENOMEM;
1614         }
1615         hdr = &whdr->header;
1616         nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
1617         hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1618         atomic_inc(&hdr->refcnt);
1619         ret = nfs_generic_flush(desc, hdr);
1620         if (ret != 0) {
1621                 pnfs_put_lseg(desc->pg_lseg);
1622                 desc->pg_lseg = NULL;
1623         } else
1624                 pnfs_do_multiple_writes(desc, &hdr->rpc_list, desc->pg_ioflags);
1625         if (atomic_dec_and_test(&hdr->refcnt))
1626                 hdr->completion_ops->completion(hdr);
1627         return ret;
1628 }
1629 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
1630
1631 int pnfs_read_done_resend_to_mds(struct inode *inode,
1632                                 struct list_head *head,
1633                                 const struct nfs_pgio_completion_ops *compl_ops,
1634                                 struct nfs_direct_req *dreq)
1635 {
1636         struct nfs_pageio_descriptor pgio;
1637         LIST_HEAD(failed);
1638
1639         /* Resend all requests through the MDS */
1640         nfs_pageio_init_read(&pgio, inode, compl_ops);
1641         pgio.pg_dreq = dreq;
1642         while (!list_empty(head)) {
1643                 struct nfs_page *req = nfs_list_entry(head->next);
1644
1645                 nfs_list_remove_request(req);
1646                 if (!nfs_pageio_add_request(&pgio, req))
1647                         nfs_list_add_request(req, &failed);
1648         }
1649         nfs_pageio_complete(&pgio);
1650
1651         if (!list_empty(&failed)) {
1652                 list_move(&failed, head);
1653                 return -EIO;
1654         }
1655         return 0;
1656 }
1657 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
1658
1659 static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
1660 {
1661         struct nfs_pgio_header *hdr = data->header;
1662
1663         dprintk("pnfs read error = %d\n", hdr->pnfs_error);
1664         if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1665             PNFS_LAYOUTRET_ON_ERROR) {
1666                 pnfs_return_layout(hdr->inode);
1667         }
1668         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1669                 data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
1670                                                         &hdr->pages,
1671                                                         hdr->completion_ops,
1672                                                         hdr->dreq);
1673 }
1674
1675 /*
1676  * Called by non rpc-based layout drivers
1677  */
1678 void pnfs_ld_read_done(struct nfs_read_data *data)
1679 {
1680         struct nfs_pgio_header *hdr = data->header;
1681
1682         if (likely(!hdr->pnfs_error)) {
1683                 __nfs4_read_done_cb(data);
1684                 hdr->mds_ops->rpc_call_done(&data->task, data);
1685         } else
1686                 pnfs_ld_handle_read_error(data);
1687         hdr->mds_ops->rpc_release(data);
1688 }
1689 EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
1690
1691 static void
1692 pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
1693                 struct nfs_read_data *data)
1694 {
1695         struct nfs_pgio_header *hdr = data->header;
1696
1697         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1698                 list_splice_tail_init(&hdr->pages, &desc->pg_list);
1699                 nfs_pageio_reset_read_mds(desc);
1700                 desc->pg_recoalesce = 1;
1701         }
1702         nfs_readdata_release(data);
1703 }
1704
1705 /*
1706  * Call the appropriate parallel I/O subsystem read function.
1707  */
1708 static enum pnfs_try_status
1709 pnfs_try_to_read_data(struct nfs_read_data *rdata,
1710                        const struct rpc_call_ops *call_ops,
1711                        struct pnfs_layout_segment *lseg)
1712 {
1713         struct nfs_pgio_header *hdr = rdata->header;
1714         struct inode *inode = hdr->inode;
1715         struct nfs_server *nfss = NFS_SERVER(inode);
1716         enum pnfs_try_status trypnfs;
1717
1718         hdr->mds_ops = call_ops;
1719
1720         dprintk("%s: Reading ino:%lu %u@%llu\n",
1721                 __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
1722
1723         trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
1724         if (trypnfs != PNFS_NOT_ATTEMPTED)
1725                 nfs_inc_stats(inode, NFSIOS_PNFS_READ);
1726         dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1727         return trypnfs;
1728 }
1729
1730 static void
1731 pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *head)
1732 {
1733         struct nfs_read_data *data;
1734         const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1735         struct pnfs_layout_segment *lseg = desc->pg_lseg;
1736
1737         desc->pg_lseg = NULL;
1738         while (!list_empty(head)) {
1739                 enum pnfs_try_status trypnfs;
1740
1741                 data = list_first_entry(head, struct nfs_read_data, list);
1742                 list_del_init(&data->list);
1743
1744                 trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
1745                 if (trypnfs == PNFS_NOT_ATTEMPTED)
1746                         pnfs_read_through_mds(desc, data);
1747         }
1748         pnfs_put_lseg(lseg);
1749 }
1750
1751 static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
1752 {
1753         pnfs_put_lseg(hdr->lseg);
1754         nfs_readhdr_free(hdr);
1755 }
1756 EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
1757
1758 int
1759 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
1760 {
1761         struct nfs_read_header *rhdr;
1762         struct nfs_pgio_header *hdr;
1763         int ret;
1764
1765         rhdr = nfs_readhdr_alloc();
1766         if (!rhdr) {
1767                 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1768                 ret = -ENOMEM;
1769                 pnfs_put_lseg(desc->pg_lseg);
1770                 desc->pg_lseg = NULL;
1771                 return ret;
1772         }
1773         hdr = &rhdr->header;
1774         nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
1775         hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1776         atomic_inc(&hdr->refcnt);
1777         ret = nfs_generic_pagein(desc, hdr);
1778         if (ret != 0) {
1779                 pnfs_put_lseg(desc->pg_lseg);
1780                 desc->pg_lseg = NULL;
1781         } else
1782                 pnfs_do_multiple_reads(desc, &hdr->rpc_list);
1783         if (atomic_dec_and_test(&hdr->refcnt))
1784                 hdr->completion_ops->completion(hdr);
1785         return ret;
1786 }
1787 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
1788
1789 /*
1790  * There can be multiple RW segments.
1791  */
1792 static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
1793 {
1794         struct pnfs_layout_segment *lseg;
1795
1796         list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
1797                 if (lseg->pls_range.iomode == IOMODE_RW &&
1798                     test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
1799                         list_add(&lseg->pls_lc_list, listp);
1800         }
1801 }
1802
1803 static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
1804 {
1805         struct pnfs_layout_segment *lseg, *tmp;
1806         unsigned long *bitlock = &NFS_I(inode)->flags;
1807
1808         /* Matched by references in pnfs_set_layoutcommit */
1809         list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
1810                 list_del_init(&lseg->pls_lc_list);
1811                 pnfs_put_lseg(lseg);
1812         }
1813
1814         clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
1815         smp_mb__after_clear_bit();
1816         wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
1817 }
1818
1819 void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
1820 {
1821         pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
1822 }
1823 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
1824
1825 void
1826 pnfs_set_layoutcommit(struct nfs_write_data *wdata)
1827 {
1828         struct nfs_pgio_header *hdr = wdata->header;
1829         struct inode *inode = hdr->inode;
1830         struct nfs_inode *nfsi = NFS_I(inode);
1831         loff_t end_pos = wdata->mds_offset + wdata->res.count;
1832         bool mark_as_dirty = false;
1833
1834         spin_lock(&inode->i_lock);
1835         if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1836                 mark_as_dirty = true;
1837                 dprintk("%s: Set layoutcommit for inode %lu ",
1838                         __func__, inode->i_ino);
1839         }
1840         if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
1841                 /* references matched in nfs4_layoutcommit_release */
1842                 pnfs_get_lseg(hdr->lseg);
1843         }
1844         if (end_pos > nfsi->layout->plh_lwb)
1845                 nfsi->layout->plh_lwb = end_pos;
1846         spin_unlock(&inode->i_lock);
1847         dprintk("%s: lseg %p end_pos %llu\n",
1848                 __func__, hdr->lseg, nfsi->layout->plh_lwb);
1849
1850         /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
1851          * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
1852         if (mark_as_dirty)
1853                 mark_inode_dirty_sync(inode);
1854 }
1855 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
1856
1857 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
1858 {
1859         struct nfs_server *nfss = NFS_SERVER(data->args.inode);
1860
1861         if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
1862                 nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
1863         pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
1864 }
1865
1866 /*
1867  * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
1868  * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
1869  * data to disk to allow the server to recover the data if it crashes.
1870  * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
1871  * is off, and a COMMIT is sent to a data server, or
1872  * if WRITEs to a data server return NFS_DATA_SYNC.
1873  */
1874 int
1875 pnfs_layoutcommit_inode(struct inode *inode, bool sync)
1876 {
1877         struct nfs4_layoutcommit_data *data;
1878         struct nfs_inode *nfsi = NFS_I(inode);
1879         loff_t end_pos;
1880         int status = 0;
1881
1882         dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
1883
1884         if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1885                 return 0;
1886
1887         /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
1888         data = kzalloc(sizeof(*data), GFP_NOFS);
1889         if (!data) {
1890                 status = -ENOMEM;
1891                 goto out;
1892         }
1893
1894         if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1895                 goto out_free;
1896
1897         if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
1898                 if (!sync) {
1899                         status = -EAGAIN;
1900                         goto out_free;
1901                 }
1902                 status = wait_on_bit_lock(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING,
1903                                         nfs_wait_bit_killable, TASK_KILLABLE);
1904                 if (status)
1905                         goto out_free;
1906         }
1907
1908         INIT_LIST_HEAD(&data->lseg_list);
1909         spin_lock(&inode->i_lock);
1910         if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1911                 clear_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags);
1912                 spin_unlock(&inode->i_lock);
1913                 wake_up_bit(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING);
1914                 goto out_free;
1915         }
1916
1917         pnfs_list_write_lseg(inode, &data->lseg_list);
1918
1919         end_pos = nfsi->layout->plh_lwb;
1920         nfsi->layout->plh_lwb = 0;
1921
1922         nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
1923         spin_unlock(&inode->i_lock);
1924
1925         data->args.inode = inode;
1926         data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
1927         nfs_fattr_init(&data->fattr);
1928         data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
1929         data->res.fattr = &data->fattr;
1930         data->args.lastbytewritten = end_pos - 1;
1931         data->res.server = NFS_SERVER(inode);
1932
1933         status = nfs4_proc_layoutcommit(data, sync);
1934 out:
1935         if (status)
1936                 mark_inode_dirty_sync(inode);
1937         dprintk("<-- %s status %d\n", __func__, status);
1938         return status;
1939 out_free:
1940         kfree(data);
1941         goto out;
1942 }
1943
1944 struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
1945 {
1946         struct nfs4_threshold *thp;
1947
1948         thp = kzalloc(sizeof(*thp), GFP_NOFS);
1949         if (!thp) {
1950                 dprintk("%s mdsthreshold allocation failed\n", __func__);
1951                 return NULL;
1952         }
1953         return thp;
1954 }