]> rtime.felk.cvut.cz Git - linux-imx.git/blob - net/sunrpc/clnt.c
SUNRPC: Allow rpc_create() to request that TCP slots be unlimited
[linux-imx.git] / net / sunrpc / clnt.c
1 /*
2  *  linux/net/sunrpc/clnt.c
3  *
4  *  This file contains the high-level RPC interface.
5  *  It is modeled as a finite state machine to support both synchronous
6  *  and asynchronous requests.
7  *
8  *  -   RPC header generation and argument serialization.
9  *  -   Credential refresh.
10  *  -   TCP connect handling.
11  *  -   Retry of operation when it is suspected the operation failed because
12  *      of uid squashing on the server, or when the credentials were stale
13  *      and need to be refreshed, or when a packet was damaged in transit.
14  *      This may be have to be moved to the VFS layer.
15  *
16  *  Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
17  *  Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
18  */
19
20
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kallsyms.h>
24 #include <linux/mm.h>
25 #include <linux/namei.h>
26 #include <linux/mount.h>
27 #include <linux/slab.h>
28 #include <linux/utsname.h>
29 #include <linux/workqueue.h>
30 #include <linux/in.h>
31 #include <linux/in6.h>
32 #include <linux/un.h>
33 #include <linux/rcupdate.h>
34
35 #include <linux/sunrpc/clnt.h>
36 #include <linux/sunrpc/addr.h>
37 #include <linux/sunrpc/rpc_pipe_fs.h>
38 #include <linux/sunrpc/metrics.h>
39 #include <linux/sunrpc/bc_xprt.h>
40 #include <trace/events/sunrpc.h>
41
42 #include "sunrpc.h"
43 #include "netns.h"
44
45 #ifdef RPC_DEBUG
46 # define RPCDBG_FACILITY        RPCDBG_CALL
47 #endif
48
49 #define dprint_status(t)                                        \
50         dprintk("RPC: %5u %s (status %d)\n", t->tk_pid,         \
51                         __func__, t->tk_status)
52
53 /*
54  * All RPC clients are linked into this list
55  */
56
57 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
58
59
60 static void     call_start(struct rpc_task *task);
61 static void     call_reserve(struct rpc_task *task);
62 static void     call_reserveresult(struct rpc_task *task);
63 static void     call_allocate(struct rpc_task *task);
64 static void     call_decode(struct rpc_task *task);
65 static void     call_bind(struct rpc_task *task);
66 static void     call_bind_status(struct rpc_task *task);
67 static void     call_transmit(struct rpc_task *task);
68 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
69 static void     call_bc_transmit(struct rpc_task *task);
70 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
71 static void     call_status(struct rpc_task *task);
72 static void     call_transmit_status(struct rpc_task *task);
73 static void     call_refresh(struct rpc_task *task);
74 static void     call_refreshresult(struct rpc_task *task);
75 static void     call_timeout(struct rpc_task *task);
76 static void     call_connect(struct rpc_task *task);
77 static void     call_connect_status(struct rpc_task *task);
78
79 static __be32   *rpc_encode_header(struct rpc_task *task);
80 static __be32   *rpc_verify_header(struct rpc_task *task);
81 static int      rpc_ping(struct rpc_clnt *clnt);
82
83 static void rpc_register_client(struct rpc_clnt *clnt)
84 {
85         struct net *net = rpc_net_ns(clnt);
86         struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
87
88         spin_lock(&sn->rpc_client_lock);
89         list_add(&clnt->cl_clients, &sn->all_clients);
90         spin_unlock(&sn->rpc_client_lock);
91 }
92
93 static void rpc_unregister_client(struct rpc_clnt *clnt)
94 {
95         struct net *net = rpc_net_ns(clnt);
96         struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
97
98         spin_lock(&sn->rpc_client_lock);
99         list_del(&clnt->cl_clients);
100         spin_unlock(&sn->rpc_client_lock);
101 }
102
103 static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
104 {
105         if (clnt->cl_dentry) {
106                 if (clnt->cl_auth && clnt->cl_auth->au_ops->pipes_destroy)
107                         clnt->cl_auth->au_ops->pipes_destroy(clnt->cl_auth);
108                 rpc_remove_client_dir(clnt->cl_dentry);
109         }
110         clnt->cl_dentry = NULL;
111 }
112
113 static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
114 {
115         struct net *net = rpc_net_ns(clnt);
116         struct super_block *pipefs_sb;
117
118         pipefs_sb = rpc_get_sb_net(net);
119         if (pipefs_sb) {
120                 __rpc_clnt_remove_pipedir(clnt);
121                 rpc_put_sb_net(net);
122         }
123 }
124
125 static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
126                                     struct rpc_clnt *clnt,
127                                     const char *dir_name)
128 {
129         static uint32_t clntid;
130         char name[15];
131         struct qstr q = { .name = name };
132         struct dentry *dir, *dentry;
133         int error;
134
135         dir = rpc_d_lookup_sb(sb, dir_name);
136         if (dir == NULL) {
137                 pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name);
138                 return dir;
139         }
140         for (;;) {
141                 q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
142                 name[sizeof(name) - 1] = '\0';
143                 q.hash = full_name_hash(q.name, q.len);
144                 dentry = rpc_create_client_dir(dir, &q, clnt);
145                 if (!IS_ERR(dentry))
146                         break;
147                 error = PTR_ERR(dentry);
148                 if (error != -EEXIST) {
149                         printk(KERN_INFO "RPC: Couldn't create pipefs entry"
150                                         " %s/%s, error %d\n",
151                                         dir_name, name, error);
152                         break;
153                 }
154         }
155         dput(dir);
156         return dentry;
157 }
158
159 static int
160 rpc_setup_pipedir(struct rpc_clnt *clnt, const char *dir_name)
161 {
162         struct net *net = rpc_net_ns(clnt);
163         struct super_block *pipefs_sb;
164         struct dentry *dentry;
165
166         clnt->cl_dentry = NULL;
167         if (dir_name == NULL)
168                 return 0;
169         pipefs_sb = rpc_get_sb_net(net);
170         if (!pipefs_sb)
171                 return 0;
172         dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt, dir_name);
173         rpc_put_sb_net(net);
174         if (IS_ERR(dentry))
175                 return PTR_ERR(dentry);
176         clnt->cl_dentry = dentry;
177         return 0;
178 }
179
180 static inline int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
181 {
182         if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) ||
183             ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry))
184                 return 1;
185         return 0;
186 }
187
188 static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
189                                    struct super_block *sb)
190 {
191         struct dentry *dentry;
192         int err = 0;
193
194         switch (event) {
195         case RPC_PIPEFS_MOUNT:
196                 dentry = rpc_setup_pipedir_sb(sb, clnt,
197                                               clnt->cl_program->pipe_dir_name);
198                 if (!dentry)
199                         return -ENOENT;
200                 if (IS_ERR(dentry))
201                         return PTR_ERR(dentry);
202                 clnt->cl_dentry = dentry;
203                 if (clnt->cl_auth->au_ops->pipes_create) {
204                         err = clnt->cl_auth->au_ops->pipes_create(clnt->cl_auth);
205                         if (err)
206                                 __rpc_clnt_remove_pipedir(clnt);
207                 }
208                 break;
209         case RPC_PIPEFS_UMOUNT:
210                 __rpc_clnt_remove_pipedir(clnt);
211                 break;
212         default:
213                 printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event);
214                 return -ENOTSUPP;
215         }
216         return err;
217 }
218
219 static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
220                                 struct super_block *sb)
221 {
222         int error = 0;
223
224         for (;; clnt = clnt->cl_parent) {
225                 if (!rpc_clnt_skip_event(clnt, event))
226                         error = __rpc_clnt_handle_event(clnt, event, sb);
227                 if (error || clnt == clnt->cl_parent)
228                         break;
229         }
230         return error;
231 }
232
233 static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
234 {
235         struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
236         struct rpc_clnt *clnt;
237
238         spin_lock(&sn->rpc_client_lock);
239         list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
240                 if (clnt->cl_program->pipe_dir_name == NULL)
241                         continue;
242                 if (rpc_clnt_skip_event(clnt, event))
243                         continue;
244                 if (atomic_inc_not_zero(&clnt->cl_count) == 0)
245                         continue;
246                 spin_unlock(&sn->rpc_client_lock);
247                 return clnt;
248         }
249         spin_unlock(&sn->rpc_client_lock);
250         return NULL;
251 }
252
253 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
254                             void *ptr)
255 {
256         struct super_block *sb = ptr;
257         struct rpc_clnt *clnt;
258         int error = 0;
259
260         while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) {
261                 error = __rpc_pipefs_event(clnt, event, sb);
262                 rpc_release_client(clnt);
263                 if (error)
264                         break;
265         }
266         return error;
267 }
268
269 static struct notifier_block rpc_clients_block = {
270         .notifier_call  = rpc_pipefs_event,
271         .priority       = SUNRPC_PIPEFS_RPC_PRIO,
272 };
273
274 int rpc_clients_notifier_register(void)
275 {
276         return rpc_pipefs_notifier_register(&rpc_clients_block);
277 }
278
279 void rpc_clients_notifier_unregister(void)
280 {
281         return rpc_pipefs_notifier_unregister(&rpc_clients_block);
282 }
283
284 static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
285 {
286         clnt->cl_nodelen = strlen(nodename);
287         if (clnt->cl_nodelen > UNX_MAXNODENAME)
288                 clnt->cl_nodelen = UNX_MAXNODENAME;
289         memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen);
290 }
291
292 static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
293 {
294         const struct rpc_program *program = args->program;
295         const struct rpc_version *version;
296         struct rpc_clnt         *clnt = NULL;
297         struct rpc_auth         *auth;
298         int err;
299
300         /* sanity check the name before trying to print it */
301         dprintk("RPC:       creating %s client for %s (xprt %p)\n",
302                         program->name, args->servername, xprt);
303
304         err = rpciod_up();
305         if (err)
306                 goto out_no_rpciod;
307         err = -EINVAL;
308         if (!xprt)
309                 goto out_no_xprt;
310
311         if (args->version >= program->nrvers)
312                 goto out_err;
313         version = program->version[args->version];
314         if (version == NULL)
315                 goto out_err;
316
317         err = -ENOMEM;
318         clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
319         if (!clnt)
320                 goto out_err;
321         clnt->cl_parent = clnt;
322
323         rcu_assign_pointer(clnt->cl_xprt, xprt);
324         clnt->cl_procinfo = version->procs;
325         clnt->cl_maxproc  = version->nrprocs;
326         clnt->cl_protname = program->name;
327         clnt->cl_prog     = args->prognumber ? : program->number;
328         clnt->cl_vers     = version->number;
329         clnt->cl_stats    = program->stats;
330         clnt->cl_metrics  = rpc_alloc_iostats(clnt);
331         err = -ENOMEM;
332         if (clnt->cl_metrics == NULL)
333                 goto out_no_stats;
334         clnt->cl_program  = program;
335         INIT_LIST_HEAD(&clnt->cl_tasks);
336         spin_lock_init(&clnt->cl_lock);
337
338         if (!xprt_bound(xprt))
339                 clnt->cl_autobind = 1;
340
341         clnt->cl_timeout = xprt->timeout;
342         if (args->timeout != NULL) {
343                 memcpy(&clnt->cl_timeout_default, args->timeout,
344                                 sizeof(clnt->cl_timeout_default));
345                 clnt->cl_timeout = &clnt->cl_timeout_default;
346         }
347
348         clnt->cl_rtt = &clnt->cl_rtt_default;
349         rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
350         clnt->cl_principal = NULL;
351         if (args->client_name) {
352                 clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL);
353                 if (!clnt->cl_principal)
354                         goto out_no_principal;
355         }
356
357         atomic_set(&clnt->cl_count, 1);
358
359         err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
360         if (err < 0)
361                 goto out_no_path;
362
363         auth = rpcauth_create(args->authflavor, clnt);
364         if (IS_ERR(auth)) {
365                 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
366                                 args->authflavor);
367                 err = PTR_ERR(auth);
368                 goto out_no_auth;
369         }
370
371         /* save the nodename */
372         rpc_clnt_set_nodename(clnt, utsname()->nodename);
373         rpc_register_client(clnt);
374         return clnt;
375
376 out_no_auth:
377         rpc_clnt_remove_pipedir(clnt);
378 out_no_path:
379         kfree(clnt->cl_principal);
380 out_no_principal:
381         rpc_free_iostats(clnt->cl_metrics);
382 out_no_stats:
383         kfree(clnt);
384 out_err:
385         xprt_put(xprt);
386 out_no_xprt:
387         rpciod_down();
388 out_no_rpciod:
389         return ERR_PTR(err);
390 }
391
392 /**
393  * rpc_create - create an RPC client and transport with one call
394  * @args: rpc_clnt create argument structure
395  *
396  * Creates and initializes an RPC transport and an RPC client.
397  *
398  * It can ping the server in order to determine if it is up, and to see if
399  * it supports this program and version.  RPC_CLNT_CREATE_NOPING disables
400  * this behavior so asynchronous tasks can also use rpc_create.
401  */
402 struct rpc_clnt *rpc_create(struct rpc_create_args *args)
403 {
404         struct rpc_xprt *xprt;
405         struct rpc_clnt *clnt;
406         struct xprt_create xprtargs = {
407                 .net = args->net,
408                 .ident = args->protocol,
409                 .srcaddr = args->saddress,
410                 .dstaddr = args->address,
411                 .addrlen = args->addrsize,
412                 .servername = args->servername,
413                 .bc_xprt = args->bc_xprt,
414         };
415         char servername[48];
416
417         if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
418                 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
419         /*
420          * If the caller chooses not to specify a hostname, whip
421          * up a string representation of the passed-in address.
422          */
423         if (xprtargs.servername == NULL) {
424                 struct sockaddr_un *sun =
425                                 (struct sockaddr_un *)args->address;
426                 struct sockaddr_in *sin =
427                                 (struct sockaddr_in *)args->address;
428                 struct sockaddr_in6 *sin6 =
429                                 (struct sockaddr_in6 *)args->address;
430
431                 servername[0] = '\0';
432                 switch (args->address->sa_family) {
433                 case AF_LOCAL:
434                         snprintf(servername, sizeof(servername), "%s",
435                                  sun->sun_path);
436                         break;
437                 case AF_INET:
438                         snprintf(servername, sizeof(servername), "%pI4",
439                                  &sin->sin_addr.s_addr);
440                         break;
441                 case AF_INET6:
442                         snprintf(servername, sizeof(servername), "%pI6",
443                                  &sin6->sin6_addr);
444                         break;
445                 default:
446                         /* caller wants default server name, but
447                          * address family isn't recognized. */
448                         return ERR_PTR(-EINVAL);
449                 }
450                 xprtargs.servername = servername;
451         }
452
453         xprt = xprt_create_transport(&xprtargs);
454         if (IS_ERR(xprt))
455                 return (struct rpc_clnt *)xprt;
456
457         /*
458          * By default, kernel RPC client connects from a reserved port.
459          * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
460          * but it is always enabled for rpciod, which handles the connect
461          * operation.
462          */
463         xprt->resvport = 1;
464         if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
465                 xprt->resvport = 0;
466
467         clnt = rpc_new_client(args, xprt);
468         if (IS_ERR(clnt))
469                 return clnt;
470
471         if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
472                 int err = rpc_ping(clnt);
473                 if (err != 0) {
474                         rpc_shutdown_client(clnt);
475                         return ERR_PTR(err);
476                 }
477         }
478
479         clnt->cl_softrtry = 1;
480         if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
481                 clnt->cl_softrtry = 0;
482
483         if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
484                 clnt->cl_autobind = 1;
485         if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
486                 clnt->cl_discrtry = 1;
487         if (!(args->flags & RPC_CLNT_CREATE_QUIET))
488                 clnt->cl_chatty = 1;
489
490         return clnt;
491 }
492 EXPORT_SYMBOL_GPL(rpc_create);
493
494 /*
495  * This function clones the RPC client structure. It allows us to share the
496  * same transport while varying parameters such as the authentication
497  * flavour.
498  */
499 static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
500                                            struct rpc_clnt *clnt)
501 {
502         struct rpc_xprt *xprt;
503         struct rpc_clnt *new;
504         int err;
505
506         err = -ENOMEM;
507         rcu_read_lock();
508         xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
509         rcu_read_unlock();
510         if (xprt == NULL)
511                 goto out_err;
512         args->servername = xprt->servername;
513
514         new = rpc_new_client(args, xprt);
515         if (IS_ERR(new)) {
516                 err = PTR_ERR(new);
517                 goto out_put;
518         }
519
520         atomic_inc(&clnt->cl_count);
521         new->cl_parent = clnt;
522
523         /* Turn off autobind on clones */
524         new->cl_autobind = 0;
525         new->cl_softrtry = clnt->cl_softrtry;
526         new->cl_discrtry = clnt->cl_discrtry;
527         new->cl_chatty = clnt->cl_chatty;
528         return new;
529
530 out_put:
531         xprt_put(xprt);
532 out_err:
533         dprintk("RPC:       %s: returned error %d\n", __func__, err);
534         return ERR_PTR(err);
535 }
536
537 /**
538  * rpc_clone_client - Clone an RPC client structure
539  *
540  * @clnt: RPC client whose parameters are copied
541  *
542  * Returns a fresh RPC client or an ERR_PTR.
543  */
544 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt)
545 {
546         struct rpc_create_args args = {
547                 .program        = clnt->cl_program,
548                 .prognumber     = clnt->cl_prog,
549                 .version        = clnt->cl_vers,
550                 .authflavor     = clnt->cl_auth->au_flavor,
551                 .client_name    = clnt->cl_principal,
552         };
553         return __rpc_clone_client(&args, clnt);
554 }
555 EXPORT_SYMBOL_GPL(rpc_clone_client);
556
557 /**
558  * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth
559  *
560  * @clnt: RPC client whose parameters are copied
561  * @flavor: security flavor for new client
562  *
563  * Returns a fresh RPC client or an ERR_PTR.
564  */
565 struct rpc_clnt *
566 rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
567 {
568         struct rpc_create_args args = {
569                 .program        = clnt->cl_program,
570                 .prognumber     = clnt->cl_prog,
571                 .version        = clnt->cl_vers,
572                 .authflavor     = flavor,
573                 .client_name    = clnt->cl_principal,
574         };
575         return __rpc_clone_client(&args, clnt);
576 }
577 EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth);
578
579 /*
580  * Kill all tasks for the given client.
581  * XXX: kill their descendants as well?
582  */
583 void rpc_killall_tasks(struct rpc_clnt *clnt)
584 {
585         struct rpc_task *rovr;
586
587
588         if (list_empty(&clnt->cl_tasks))
589                 return;
590         dprintk("RPC:       killing all tasks for client %p\n", clnt);
591         /*
592          * Spin lock all_tasks to prevent changes...
593          */
594         spin_lock(&clnt->cl_lock);
595         list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
596                 if (!RPC_IS_ACTIVATED(rovr))
597                         continue;
598                 if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
599                         rovr->tk_flags |= RPC_TASK_KILLED;
600                         rpc_exit(rovr, -EIO);
601                         if (RPC_IS_QUEUED(rovr))
602                                 rpc_wake_up_queued_task(rovr->tk_waitqueue,
603                                                         rovr);
604                 }
605         }
606         spin_unlock(&clnt->cl_lock);
607 }
608 EXPORT_SYMBOL_GPL(rpc_killall_tasks);
609
610 /*
611  * Properly shut down an RPC client, terminating all outstanding
612  * requests.
613  */
614 void rpc_shutdown_client(struct rpc_clnt *clnt)
615 {
616         might_sleep();
617
618         dprintk_rcu("RPC:       shutting down %s client for %s\n",
619                         clnt->cl_protname,
620                         rcu_dereference(clnt->cl_xprt)->servername);
621
622         while (!list_empty(&clnt->cl_tasks)) {
623                 rpc_killall_tasks(clnt);
624                 wait_event_timeout(destroy_wait,
625                         list_empty(&clnt->cl_tasks), 1*HZ);
626         }
627
628         rpc_release_client(clnt);
629 }
630 EXPORT_SYMBOL_GPL(rpc_shutdown_client);
631
632 /*
633  * Free an RPC client
634  */
635 static void
636 rpc_free_client(struct rpc_clnt *clnt)
637 {
638         dprintk_rcu("RPC:       destroying %s client for %s\n",
639                         clnt->cl_protname,
640                         rcu_dereference(clnt->cl_xprt)->servername);
641         if (clnt->cl_parent != clnt)
642                 rpc_release_client(clnt->cl_parent);
643         rpc_unregister_client(clnt);
644         rpc_clnt_remove_pipedir(clnt);
645         rpc_free_iostats(clnt->cl_metrics);
646         kfree(clnt->cl_principal);
647         clnt->cl_metrics = NULL;
648         xprt_put(rcu_dereference_raw(clnt->cl_xprt));
649         rpciod_down();
650         kfree(clnt);
651 }
652
653 /*
654  * Free an RPC client
655  */
656 static void
657 rpc_free_auth(struct rpc_clnt *clnt)
658 {
659         if (clnt->cl_auth == NULL) {
660                 rpc_free_client(clnt);
661                 return;
662         }
663
664         /*
665          * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
666          *       release remaining GSS contexts. This mechanism ensures
667          *       that it can do so safely.
668          */
669         atomic_inc(&clnt->cl_count);
670         rpcauth_release(clnt->cl_auth);
671         clnt->cl_auth = NULL;
672         if (atomic_dec_and_test(&clnt->cl_count))
673                 rpc_free_client(clnt);
674 }
675
676 /*
677  * Release reference to the RPC client
678  */
679 void
680 rpc_release_client(struct rpc_clnt *clnt)
681 {
682         dprintk("RPC:       rpc_release_client(%p)\n", clnt);
683
684         if (list_empty(&clnt->cl_tasks))
685                 wake_up(&destroy_wait);
686         if (atomic_dec_and_test(&clnt->cl_count))
687                 rpc_free_auth(clnt);
688 }
689
690 /**
691  * rpc_bind_new_program - bind a new RPC program to an existing client
692  * @old: old rpc_client
693  * @program: rpc program to set
694  * @vers: rpc program version
695  *
696  * Clones the rpc client and sets up a new RPC program. This is mainly
697  * of use for enabling different RPC programs to share the same transport.
698  * The Sun NFSv2/v3 ACL protocol can do this.
699  */
700 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
701                                       const struct rpc_program *program,
702                                       u32 vers)
703 {
704         struct rpc_create_args args = {
705                 .program        = program,
706                 .prognumber     = program->number,
707                 .version        = vers,
708                 .authflavor     = old->cl_auth->au_flavor,
709                 .client_name    = old->cl_principal,
710         };
711         struct rpc_clnt *clnt;
712         int err;
713
714         clnt = __rpc_clone_client(&args, old);
715         if (IS_ERR(clnt))
716                 goto out;
717         err = rpc_ping(clnt);
718         if (err != 0) {
719                 rpc_shutdown_client(clnt);
720                 clnt = ERR_PTR(err);
721         }
722 out:
723         return clnt;
724 }
725 EXPORT_SYMBOL_GPL(rpc_bind_new_program);
726
727 void rpc_task_release_client(struct rpc_task *task)
728 {
729         struct rpc_clnt *clnt = task->tk_client;
730
731         if (clnt != NULL) {
732                 /* Remove from client task list */
733                 spin_lock(&clnt->cl_lock);
734                 list_del(&task->tk_task);
735                 spin_unlock(&clnt->cl_lock);
736                 task->tk_client = NULL;
737
738                 rpc_release_client(clnt);
739         }
740 }
741
742 static
743 void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
744 {
745         if (clnt != NULL) {
746                 rpc_task_release_client(task);
747                 task->tk_client = clnt;
748                 atomic_inc(&clnt->cl_count);
749                 if (clnt->cl_softrtry)
750                         task->tk_flags |= RPC_TASK_SOFT;
751                 if (sk_memalloc_socks()) {
752                         struct rpc_xprt *xprt;
753
754                         rcu_read_lock();
755                         xprt = rcu_dereference(clnt->cl_xprt);
756                         if (xprt->swapper)
757                                 task->tk_flags |= RPC_TASK_SWAPPER;
758                         rcu_read_unlock();
759                 }
760                 /* Add to the client's list of all tasks */
761                 spin_lock(&clnt->cl_lock);
762                 list_add_tail(&task->tk_task, &clnt->cl_tasks);
763                 spin_unlock(&clnt->cl_lock);
764         }
765 }
766
767 void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt)
768 {
769         rpc_task_release_client(task);
770         rpc_task_set_client(task, clnt);
771 }
772 EXPORT_SYMBOL_GPL(rpc_task_reset_client);
773
774
775 static void
776 rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
777 {
778         if (msg != NULL) {
779                 task->tk_msg.rpc_proc = msg->rpc_proc;
780                 task->tk_msg.rpc_argp = msg->rpc_argp;
781                 task->tk_msg.rpc_resp = msg->rpc_resp;
782                 if (msg->rpc_cred != NULL)
783                         task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred);
784         }
785 }
786
787 /*
788  * Default callback for async RPC calls
789  */
790 static void
791 rpc_default_callback(struct rpc_task *task, void *data)
792 {
793 }
794
795 static const struct rpc_call_ops rpc_default_ops = {
796         .rpc_call_done = rpc_default_callback,
797 };
798
799 /**
800  * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
801  * @task_setup_data: pointer to task initialisation data
802  */
803 struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
804 {
805         struct rpc_task *task;
806
807         task = rpc_new_task(task_setup_data);
808         if (IS_ERR(task))
809                 goto out;
810
811         rpc_task_set_client(task, task_setup_data->rpc_client);
812         rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
813
814         if (task->tk_action == NULL)
815                 rpc_call_start(task);
816
817         atomic_inc(&task->tk_count);
818         rpc_execute(task);
819 out:
820         return task;
821 }
822 EXPORT_SYMBOL_GPL(rpc_run_task);
823
824 /**
825  * rpc_call_sync - Perform a synchronous RPC call
826  * @clnt: pointer to RPC client
827  * @msg: RPC call parameters
828  * @flags: RPC call flags
829  */
830 int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
831 {
832         struct rpc_task *task;
833         struct rpc_task_setup task_setup_data = {
834                 .rpc_client = clnt,
835                 .rpc_message = msg,
836                 .callback_ops = &rpc_default_ops,
837                 .flags = flags,
838         };
839         int status;
840
841         WARN_ON_ONCE(flags & RPC_TASK_ASYNC);
842         if (flags & RPC_TASK_ASYNC) {
843                 rpc_release_calldata(task_setup_data.callback_ops,
844                         task_setup_data.callback_data);
845                 return -EINVAL;
846         }
847
848         task = rpc_run_task(&task_setup_data);
849         if (IS_ERR(task))
850                 return PTR_ERR(task);
851         status = task->tk_status;
852         rpc_put_task(task);
853         return status;
854 }
855 EXPORT_SYMBOL_GPL(rpc_call_sync);
856
857 /**
858  * rpc_call_async - Perform an asynchronous RPC call
859  * @clnt: pointer to RPC client
860  * @msg: RPC call parameters
861  * @flags: RPC call flags
862  * @tk_ops: RPC call ops
863  * @data: user call data
864  */
865 int
866 rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
867                const struct rpc_call_ops *tk_ops, void *data)
868 {
869         struct rpc_task *task;
870         struct rpc_task_setup task_setup_data = {
871                 .rpc_client = clnt,
872                 .rpc_message = msg,
873                 .callback_ops = tk_ops,
874                 .callback_data = data,
875                 .flags = flags|RPC_TASK_ASYNC,
876         };
877
878         task = rpc_run_task(&task_setup_data);
879         if (IS_ERR(task))
880                 return PTR_ERR(task);
881         rpc_put_task(task);
882         return 0;
883 }
884 EXPORT_SYMBOL_GPL(rpc_call_async);
885
886 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
887 /**
888  * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
889  * rpc_execute against it
890  * @req: RPC request
891  * @tk_ops: RPC call ops
892  */
893 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
894                                 const struct rpc_call_ops *tk_ops)
895 {
896         struct rpc_task *task;
897         struct xdr_buf *xbufp = &req->rq_snd_buf;
898         struct rpc_task_setup task_setup_data = {
899                 .callback_ops = tk_ops,
900         };
901
902         dprintk("RPC: rpc_run_bc_task req= %p\n", req);
903         /*
904          * Create an rpc_task to send the data
905          */
906         task = rpc_new_task(&task_setup_data);
907         if (IS_ERR(task)) {
908                 xprt_free_bc_request(req);
909                 goto out;
910         }
911         task->tk_rqstp = req;
912
913         /*
914          * Set up the xdr_buf length.
915          * This also indicates that the buffer is XDR encoded already.
916          */
917         xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
918                         xbufp->tail[0].iov_len;
919
920         task->tk_action = call_bc_transmit;
921         atomic_inc(&task->tk_count);
922         WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
923         rpc_execute(task);
924
925 out:
926         dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
927         return task;
928 }
929 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
930
931 void
932 rpc_call_start(struct rpc_task *task)
933 {
934         task->tk_action = call_start;
935 }
936 EXPORT_SYMBOL_GPL(rpc_call_start);
937
938 /**
939  * rpc_peeraddr - extract remote peer address from clnt's xprt
940  * @clnt: RPC client structure
941  * @buf: target buffer
942  * @bufsize: length of target buffer
943  *
944  * Returns the number of bytes that are actually in the stored address.
945  */
946 size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
947 {
948         size_t bytes;
949         struct rpc_xprt *xprt;
950
951         rcu_read_lock();
952         xprt = rcu_dereference(clnt->cl_xprt);
953
954         bytes = xprt->addrlen;
955         if (bytes > bufsize)
956                 bytes = bufsize;
957         memcpy(buf, &xprt->addr, bytes);
958         rcu_read_unlock();
959
960         return bytes;
961 }
962 EXPORT_SYMBOL_GPL(rpc_peeraddr);
963
964 /**
965  * rpc_peeraddr2str - return remote peer address in printable format
966  * @clnt: RPC client structure
967  * @format: address format
968  *
969  * NB: the lifetime of the memory referenced by the returned pointer is
970  * the same as the rpc_xprt itself.  As long as the caller uses this
971  * pointer, it must hold the RCU read lock.
972  */
973 const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
974                              enum rpc_display_format_t format)
975 {
976         struct rpc_xprt *xprt;
977
978         xprt = rcu_dereference(clnt->cl_xprt);
979
980         if (xprt->address_strings[format] != NULL)
981                 return xprt->address_strings[format];
982         else
983                 return "unprintable";
984 }
985 EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
986
987 static const struct sockaddr_in rpc_inaddr_loopback = {
988         .sin_family             = AF_INET,
989         .sin_addr.s_addr        = htonl(INADDR_ANY),
990 };
991
992 static const struct sockaddr_in6 rpc_in6addr_loopback = {
993         .sin6_family            = AF_INET6,
994         .sin6_addr              = IN6ADDR_ANY_INIT,
995 };
996
997 /*
998  * Try a getsockname() on a connected datagram socket.  Using a
999  * connected datagram socket prevents leaving a socket in TIME_WAIT.
1000  * This conserves the ephemeral port number space.
1001  *
1002  * Returns zero and fills in "buf" if successful; otherwise, a
1003  * negative errno is returned.
1004  */
1005 static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen,
1006                         struct sockaddr *buf, int buflen)
1007 {
1008         struct socket *sock;
1009         int err;
1010
1011         err = __sock_create(net, sap->sa_family,
1012                                 SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
1013         if (err < 0) {
1014                 dprintk("RPC:       can't create UDP socket (%d)\n", err);
1015                 goto out;
1016         }
1017
1018         switch (sap->sa_family) {
1019         case AF_INET:
1020                 err = kernel_bind(sock,
1021                                 (struct sockaddr *)&rpc_inaddr_loopback,
1022                                 sizeof(rpc_inaddr_loopback));
1023                 break;
1024         case AF_INET6:
1025                 err = kernel_bind(sock,
1026                                 (struct sockaddr *)&rpc_in6addr_loopback,
1027                                 sizeof(rpc_in6addr_loopback));
1028                 break;
1029         default:
1030                 err = -EAFNOSUPPORT;
1031                 goto out;
1032         }
1033         if (err < 0) {
1034                 dprintk("RPC:       can't bind UDP socket (%d)\n", err);
1035                 goto out_release;
1036         }
1037
1038         err = kernel_connect(sock, sap, salen, 0);
1039         if (err < 0) {
1040                 dprintk("RPC:       can't connect UDP socket (%d)\n", err);
1041                 goto out_release;
1042         }
1043
1044         err = kernel_getsockname(sock, buf, &buflen);
1045         if (err < 0) {
1046                 dprintk("RPC:       getsockname failed (%d)\n", err);
1047                 goto out_release;
1048         }
1049
1050         err = 0;
1051         if (buf->sa_family == AF_INET6) {
1052                 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf;
1053                 sin6->sin6_scope_id = 0;
1054         }
1055         dprintk("RPC:       %s succeeded\n", __func__);
1056
1057 out_release:
1058         sock_release(sock);
1059 out:
1060         return err;
1061 }
1062
1063 /*
1064  * Scraping a connected socket failed, so we don't have a useable
1065  * local address.  Fallback: generate an address that will prevent
1066  * the server from calling us back.
1067  *
1068  * Returns zero and fills in "buf" if successful; otherwise, a
1069  * negative errno is returned.
1070  */
1071 static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen)
1072 {
1073         switch (family) {
1074         case AF_INET:
1075                 if (buflen < sizeof(rpc_inaddr_loopback))
1076                         return -EINVAL;
1077                 memcpy(buf, &rpc_inaddr_loopback,
1078                                 sizeof(rpc_inaddr_loopback));
1079                 break;
1080         case AF_INET6:
1081                 if (buflen < sizeof(rpc_in6addr_loopback))
1082                         return -EINVAL;
1083                 memcpy(buf, &rpc_in6addr_loopback,
1084                                 sizeof(rpc_in6addr_loopback));
1085         default:
1086                 dprintk("RPC:       %s: address family not supported\n",
1087                         __func__);
1088                 return -EAFNOSUPPORT;
1089         }
1090         dprintk("RPC:       %s: succeeded\n", __func__);
1091         return 0;
1092 }
1093
1094 /**
1095  * rpc_localaddr - discover local endpoint address for an RPC client
1096  * @clnt: RPC client structure
1097  * @buf: target buffer
1098  * @buflen: size of target buffer, in bytes
1099  *
1100  * Returns zero and fills in "buf" and "buflen" if successful;
1101  * otherwise, a negative errno is returned.
1102  *
1103  * This works even if the underlying transport is not currently connected,
1104  * or if the upper layer never previously provided a source address.
1105  *
1106  * The result of this function call is transient: multiple calls in
1107  * succession may give different results, depending on how local
1108  * networking configuration changes over time.
1109  */
1110 int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen)
1111 {
1112         struct sockaddr_storage address;
1113         struct sockaddr *sap = (struct sockaddr *)&address;
1114         struct rpc_xprt *xprt;
1115         struct net *net;
1116         size_t salen;
1117         int err;
1118
1119         rcu_read_lock();
1120         xprt = rcu_dereference(clnt->cl_xprt);
1121         salen = xprt->addrlen;
1122         memcpy(sap, &xprt->addr, salen);
1123         net = get_net(xprt->xprt_net);
1124         rcu_read_unlock();
1125
1126         rpc_set_port(sap, 0);
1127         err = rpc_sockname(net, sap, salen, buf, buflen);
1128         put_net(net);
1129         if (err != 0)
1130                 /* Couldn't discover local address, return ANYADDR */
1131                 return rpc_anyaddr(sap->sa_family, buf, buflen);
1132         return 0;
1133 }
1134 EXPORT_SYMBOL_GPL(rpc_localaddr);
1135
1136 void
1137 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
1138 {
1139         struct rpc_xprt *xprt;
1140
1141         rcu_read_lock();
1142         xprt = rcu_dereference(clnt->cl_xprt);
1143         if (xprt->ops->set_buffer_size)
1144                 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
1145         rcu_read_unlock();
1146 }
1147 EXPORT_SYMBOL_GPL(rpc_setbufsize);
1148
1149 /**
1150  * rpc_protocol - Get transport protocol number for an RPC client
1151  * @clnt: RPC client to query
1152  *
1153  */
1154 int rpc_protocol(struct rpc_clnt *clnt)
1155 {
1156         int protocol;
1157
1158         rcu_read_lock();
1159         protocol = rcu_dereference(clnt->cl_xprt)->prot;
1160         rcu_read_unlock();
1161         return protocol;
1162 }
1163 EXPORT_SYMBOL_GPL(rpc_protocol);
1164
1165 /**
1166  * rpc_net_ns - Get the network namespace for this RPC client
1167  * @clnt: RPC client to query
1168  *
1169  */
1170 struct net *rpc_net_ns(struct rpc_clnt *clnt)
1171 {
1172         struct net *ret;
1173
1174         rcu_read_lock();
1175         ret = rcu_dereference(clnt->cl_xprt)->xprt_net;
1176         rcu_read_unlock();
1177         return ret;
1178 }
1179 EXPORT_SYMBOL_GPL(rpc_net_ns);
1180
1181 /**
1182  * rpc_max_payload - Get maximum payload size for a transport, in bytes
1183  * @clnt: RPC client to query
1184  *
1185  * For stream transports, this is one RPC record fragment (see RFC
1186  * 1831), as we don't support multi-record requests yet.  For datagram
1187  * transports, this is the size of an IP packet minus the IP, UDP, and
1188  * RPC header sizes.
1189  */
1190 size_t rpc_max_payload(struct rpc_clnt *clnt)
1191 {
1192         size_t ret;
1193
1194         rcu_read_lock();
1195         ret = rcu_dereference(clnt->cl_xprt)->max_payload;
1196         rcu_read_unlock();
1197         return ret;
1198 }
1199 EXPORT_SYMBOL_GPL(rpc_max_payload);
1200
1201 /**
1202  * rpc_get_timeout - Get timeout for transport in units of HZ
1203  * @clnt: RPC client to query
1204  */
1205 unsigned long rpc_get_timeout(struct rpc_clnt *clnt)
1206 {
1207         unsigned long ret;
1208
1209         rcu_read_lock();
1210         ret = rcu_dereference(clnt->cl_xprt)->timeout->to_initval;
1211         rcu_read_unlock();
1212         return ret;
1213 }
1214 EXPORT_SYMBOL_GPL(rpc_get_timeout);
1215
1216 /**
1217  * rpc_force_rebind - force transport to check that remote port is unchanged
1218  * @clnt: client to rebind
1219  *
1220  */
1221 void rpc_force_rebind(struct rpc_clnt *clnt)
1222 {
1223         if (clnt->cl_autobind) {
1224                 rcu_read_lock();
1225                 xprt_clear_bound(rcu_dereference(clnt->cl_xprt));
1226                 rcu_read_unlock();
1227         }
1228 }
1229 EXPORT_SYMBOL_GPL(rpc_force_rebind);
1230
1231 /*
1232  * Restart an (async) RPC call from the call_prepare state.
1233  * Usually called from within the exit handler.
1234  */
1235 int
1236 rpc_restart_call_prepare(struct rpc_task *task)
1237 {
1238         if (RPC_ASSASSINATED(task))
1239                 return 0;
1240         task->tk_action = call_start;
1241         if (task->tk_ops->rpc_call_prepare != NULL)
1242                 task->tk_action = rpc_prepare_task;
1243         return 1;
1244 }
1245 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
1246
1247 /*
1248  * Restart an (async) RPC call. Usually called from within the
1249  * exit handler.
1250  */
1251 int
1252 rpc_restart_call(struct rpc_task *task)
1253 {
1254         if (RPC_ASSASSINATED(task))
1255                 return 0;
1256         task->tk_action = call_start;
1257         return 1;
1258 }
1259 EXPORT_SYMBOL_GPL(rpc_restart_call);
1260
1261 #ifdef RPC_DEBUG
1262 static const char *rpc_proc_name(const struct rpc_task *task)
1263 {
1264         const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1265
1266         if (proc) {
1267                 if (proc->p_name)
1268                         return proc->p_name;
1269                 else
1270                         return "NULL";
1271         } else
1272                 return "no proc";
1273 }
1274 #endif
1275
1276 /*
1277  * 0.  Initial state
1278  *
1279  *     Other FSM states can be visited zero or more times, but
1280  *     this state is visited exactly once for each RPC.
1281  */
1282 static void
1283 call_start(struct rpc_task *task)
1284 {
1285         struct rpc_clnt *clnt = task->tk_client;
1286
1287         dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
1288                         clnt->cl_protname, clnt->cl_vers,
1289                         rpc_proc_name(task),
1290                         (RPC_IS_ASYNC(task) ? "async" : "sync"));
1291
1292         /* Increment call count */
1293         task->tk_msg.rpc_proc->p_count++;
1294         clnt->cl_stats->rpccnt++;
1295         task->tk_action = call_reserve;
1296 }
1297
1298 /*
1299  * 1.   Reserve an RPC call slot
1300  */
1301 static void
1302 call_reserve(struct rpc_task *task)
1303 {
1304         dprint_status(task);
1305
1306         task->tk_status  = 0;
1307         task->tk_action  = call_reserveresult;
1308         xprt_reserve(task);
1309 }
1310
1311 static void call_retry_reserve(struct rpc_task *task);
1312
1313 /*
1314  * 1b.  Grok the result of xprt_reserve()
1315  */
1316 static void
1317 call_reserveresult(struct rpc_task *task)
1318 {
1319         int status = task->tk_status;
1320
1321         dprint_status(task);
1322
1323         /*
1324          * After a call to xprt_reserve(), we must have either
1325          * a request slot or else an error status.
1326          */
1327         task->tk_status = 0;
1328         if (status >= 0) {
1329                 if (task->tk_rqstp) {
1330                         task->tk_action = call_refresh;
1331                         return;
1332                 }
1333
1334                 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
1335                                 __func__, status);
1336                 rpc_exit(task, -EIO);
1337                 return;
1338         }
1339
1340         /*
1341          * Even though there was an error, we may have acquired
1342          * a request slot somehow.  Make sure not to leak it.
1343          */
1344         if (task->tk_rqstp) {
1345                 printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
1346                                 __func__, status);
1347                 xprt_release(task);
1348         }
1349
1350         switch (status) {
1351         case -ENOMEM:
1352                 rpc_delay(task, HZ >> 2);
1353         case -EAGAIN:   /* woken up; retry */
1354                 task->tk_action = call_retry_reserve;
1355                 return;
1356         case -EIO:      /* probably a shutdown */
1357                 break;
1358         default:
1359                 printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
1360                                 __func__, status);
1361                 break;
1362         }
1363         rpc_exit(task, status);
1364 }
1365
1366 /*
1367  * 1c.  Retry reserving an RPC call slot
1368  */
1369 static void
1370 call_retry_reserve(struct rpc_task *task)
1371 {
1372         dprint_status(task);
1373
1374         task->tk_status  = 0;
1375         task->tk_action  = call_reserveresult;
1376         xprt_retry_reserve(task);
1377 }
1378
1379 /*
1380  * 2.   Bind and/or refresh the credentials
1381  */
1382 static void
1383 call_refresh(struct rpc_task *task)
1384 {
1385         dprint_status(task);
1386
1387         task->tk_action = call_refreshresult;
1388         task->tk_status = 0;
1389         task->tk_client->cl_stats->rpcauthrefresh++;
1390         rpcauth_refreshcred(task);
1391 }
1392
1393 /*
1394  * 2a.  Process the results of a credential refresh
1395  */
1396 static void
1397 call_refreshresult(struct rpc_task *task)
1398 {
1399         int status = task->tk_status;
1400
1401         dprint_status(task);
1402
1403         task->tk_status = 0;
1404         task->tk_action = call_refresh;
1405         switch (status) {
1406         case 0:
1407                 if (rpcauth_uptodatecred(task))
1408                         task->tk_action = call_allocate;
1409                 return;
1410         case -ETIMEDOUT:
1411                 rpc_delay(task, 3*HZ);
1412         case -EKEYEXPIRED:
1413         case -EAGAIN:
1414                 status = -EACCES;
1415                 if (!task->tk_cred_retry)
1416                         break;
1417                 task->tk_cred_retry--;
1418                 dprintk("RPC: %5u %s: retry refresh creds\n",
1419                                 task->tk_pid, __func__);
1420                 return;
1421         }
1422         dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
1423                                 task->tk_pid, __func__, status);
1424         rpc_exit(task, status);
1425 }
1426
1427 /*
1428  * 2b.  Allocate the buffer. For details, see sched.c:rpc_malloc.
1429  *      (Note: buffer memory is freed in xprt_release).
1430  */
1431 static void
1432 call_allocate(struct rpc_task *task)
1433 {
1434         unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
1435         struct rpc_rqst *req = task->tk_rqstp;
1436         struct rpc_xprt *xprt = req->rq_xprt;
1437         struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1438
1439         dprint_status(task);
1440
1441         task->tk_status = 0;
1442         task->tk_action = call_bind;
1443
1444         if (req->rq_buffer)
1445                 return;
1446
1447         if (proc->p_proc != 0) {
1448                 BUG_ON(proc->p_arglen == 0);
1449                 if (proc->p_decode != NULL)
1450                         BUG_ON(proc->p_replen == 0);
1451         }
1452
1453         /*
1454          * Calculate the size (in quads) of the RPC call
1455          * and reply headers, and convert both values
1456          * to byte sizes.
1457          */
1458         req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
1459         req->rq_callsize <<= 2;
1460         req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
1461         req->rq_rcvsize <<= 2;
1462
1463         req->rq_buffer = xprt->ops->buf_alloc(task,
1464                                         req->rq_callsize + req->rq_rcvsize);
1465         if (req->rq_buffer != NULL)
1466                 return;
1467
1468         dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
1469
1470         if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
1471                 task->tk_action = call_allocate;
1472                 rpc_delay(task, HZ>>4);
1473                 return;
1474         }
1475
1476         rpc_exit(task, -ERESTARTSYS);
1477 }
1478
1479 static inline int
1480 rpc_task_need_encode(struct rpc_task *task)
1481 {
1482         return task->tk_rqstp->rq_snd_buf.len == 0;
1483 }
1484
1485 static inline void
1486 rpc_task_force_reencode(struct rpc_task *task)
1487 {
1488         task->tk_rqstp->rq_snd_buf.len = 0;
1489         task->tk_rqstp->rq_bytes_sent = 0;
1490 }
1491
1492 static inline void
1493 rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
1494 {
1495         buf->head[0].iov_base = start;
1496         buf->head[0].iov_len = len;
1497         buf->tail[0].iov_len = 0;
1498         buf->page_len = 0;
1499         buf->flags = 0;
1500         buf->len = 0;
1501         buf->buflen = len;
1502 }
1503
1504 /*
1505  * 3.   Encode arguments of an RPC call
1506  */
1507 static void
1508 rpc_xdr_encode(struct rpc_task *task)
1509 {
1510         struct rpc_rqst *req = task->tk_rqstp;
1511         kxdreproc_t     encode;
1512         __be32          *p;
1513
1514         dprint_status(task);
1515
1516         rpc_xdr_buf_init(&req->rq_snd_buf,
1517                          req->rq_buffer,
1518                          req->rq_callsize);
1519         rpc_xdr_buf_init(&req->rq_rcv_buf,
1520                          (char *)req->rq_buffer + req->rq_callsize,
1521                          req->rq_rcvsize);
1522
1523         p = rpc_encode_header(task);
1524         if (p == NULL) {
1525                 printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
1526                 rpc_exit(task, -EIO);
1527                 return;
1528         }
1529
1530         encode = task->tk_msg.rpc_proc->p_encode;
1531         if (encode == NULL)
1532                 return;
1533
1534         task->tk_status = rpcauth_wrap_req(task, encode, req, p,
1535                         task->tk_msg.rpc_argp);
1536 }
1537
1538 /*
1539  * 4.   Get the server port number if not yet set
1540  */
1541 static void
1542 call_bind(struct rpc_task *task)
1543 {
1544         struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1545
1546         dprint_status(task);
1547
1548         task->tk_action = call_connect;
1549         if (!xprt_bound(xprt)) {
1550                 task->tk_action = call_bind_status;
1551                 task->tk_timeout = xprt->bind_timeout;
1552                 xprt->ops->rpcbind(task);
1553         }
1554 }
1555
1556 /*
1557  * 4a.  Sort out bind result
1558  */
1559 static void
1560 call_bind_status(struct rpc_task *task)
1561 {
1562         int status = -EIO;
1563
1564         if (task->tk_status >= 0) {
1565                 dprint_status(task);
1566                 task->tk_status = 0;
1567                 task->tk_action = call_connect;
1568                 return;
1569         }
1570
1571         trace_rpc_bind_status(task);
1572         switch (task->tk_status) {
1573         case -ENOMEM:
1574                 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
1575                 rpc_delay(task, HZ >> 2);
1576                 goto retry_timeout;
1577         case -EACCES:
1578                 dprintk("RPC: %5u remote rpcbind: RPC program/version "
1579                                 "unavailable\n", task->tk_pid);
1580                 /* fail immediately if this is an RPC ping */
1581                 if (task->tk_msg.rpc_proc->p_proc == 0) {
1582                         status = -EOPNOTSUPP;
1583                         break;
1584                 }
1585                 if (task->tk_rebind_retry == 0)
1586                         break;
1587                 task->tk_rebind_retry--;
1588                 rpc_delay(task, 3*HZ);
1589                 goto retry_timeout;
1590         case -ETIMEDOUT:
1591                 dprintk("RPC: %5u rpcbind request timed out\n",
1592                                 task->tk_pid);
1593                 goto retry_timeout;
1594         case -EPFNOSUPPORT:
1595                 /* server doesn't support any rpcbind version we know of */
1596                 dprintk("RPC: %5u unrecognized remote rpcbind service\n",
1597                                 task->tk_pid);
1598                 break;
1599         case -EPROTONOSUPPORT:
1600                 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
1601                                 task->tk_pid);
1602                 task->tk_status = 0;
1603                 task->tk_action = call_bind;
1604                 return;
1605         case -ECONNREFUSED:             /* connection problems */
1606         case -ECONNRESET:
1607         case -ENOTCONN:
1608         case -EHOSTDOWN:
1609         case -EHOSTUNREACH:
1610         case -ENETUNREACH:
1611         case -EPIPE:
1612                 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
1613                                 task->tk_pid, task->tk_status);
1614                 if (!RPC_IS_SOFTCONN(task)) {
1615                         rpc_delay(task, 5*HZ);
1616                         goto retry_timeout;
1617                 }
1618                 status = task->tk_status;
1619                 break;
1620         default:
1621                 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
1622                                 task->tk_pid, -task->tk_status);
1623         }
1624
1625         rpc_exit(task, status);
1626         return;
1627
1628 retry_timeout:
1629         task->tk_action = call_timeout;
1630 }
1631
1632 /*
1633  * 4b.  Connect to the RPC server
1634  */
1635 static void
1636 call_connect(struct rpc_task *task)
1637 {
1638         struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1639
1640         dprintk("RPC: %5u call_connect xprt %p %s connected\n",
1641                         task->tk_pid, xprt,
1642                         (xprt_connected(xprt) ? "is" : "is not"));
1643
1644         task->tk_action = call_transmit;
1645         if (!xprt_connected(xprt)) {
1646                 task->tk_action = call_connect_status;
1647                 if (task->tk_status < 0)
1648                         return;
1649                 xprt_connect(task);
1650         }
1651 }
1652
1653 /*
1654  * 4c.  Sort out connect result
1655  */
1656 static void
1657 call_connect_status(struct rpc_task *task)
1658 {
1659         struct rpc_clnt *clnt = task->tk_client;
1660         int status = task->tk_status;
1661
1662         dprint_status(task);
1663
1664         trace_rpc_connect_status(task, status);
1665         switch (status) {
1666                 /* if soft mounted, test if we've timed out */
1667         case -ETIMEDOUT:
1668                 task->tk_action = call_timeout;
1669                 return;
1670         case -ECONNREFUSED:
1671         case -ECONNRESET:
1672         case -ENETUNREACH:
1673                 if (RPC_IS_SOFTCONN(task))
1674                         break;
1675                 /* retry with existing socket, after a delay */
1676         case 0:
1677         case -EAGAIN:
1678                 task->tk_status = 0;
1679                 clnt->cl_stats->netreconn++;
1680                 task->tk_action = call_transmit;
1681                 return;
1682         }
1683         rpc_exit(task, status);
1684 }
1685
1686 /*
1687  * 5.   Transmit the RPC request, and wait for reply
1688  */
1689 static void
1690 call_transmit(struct rpc_task *task)
1691 {
1692         dprint_status(task);
1693
1694         task->tk_action = call_status;
1695         if (task->tk_status < 0)
1696                 return;
1697         task->tk_status = xprt_prepare_transmit(task);
1698         if (task->tk_status != 0)
1699                 return;
1700         task->tk_action = call_transmit_status;
1701         /* Encode here so that rpcsec_gss can use correct sequence number. */
1702         if (rpc_task_need_encode(task)) {
1703                 rpc_xdr_encode(task);
1704                 /* Did the encode result in an error condition? */
1705                 if (task->tk_status != 0) {
1706                         /* Was the error nonfatal? */
1707                         if (task->tk_status == -EAGAIN)
1708                                 rpc_delay(task, HZ >> 4);
1709                         else
1710                                 rpc_exit(task, task->tk_status);
1711                         return;
1712                 }
1713         }
1714         xprt_transmit(task);
1715         if (task->tk_status < 0)
1716                 return;
1717         /*
1718          * On success, ensure that we call xprt_end_transmit() before sleeping
1719          * in order to allow access to the socket to other RPC requests.
1720          */
1721         call_transmit_status(task);
1722         if (rpc_reply_expected(task))
1723                 return;
1724         task->tk_action = rpc_exit_task;
1725         rpc_wake_up_queued_task(&task->tk_rqstp->rq_xprt->pending, task);
1726 }
1727
1728 /*
1729  * 5a.  Handle cleanup after a transmission
1730  */
1731 static void
1732 call_transmit_status(struct rpc_task *task)
1733 {
1734         task->tk_action = call_status;
1735
1736         /*
1737          * Common case: success.  Force the compiler to put this
1738          * test first.
1739          */
1740         if (task->tk_status == 0) {
1741                 xprt_end_transmit(task);
1742                 rpc_task_force_reencode(task);
1743                 return;
1744         }
1745
1746         switch (task->tk_status) {
1747         case -EAGAIN:
1748                 break;
1749         default:
1750                 dprint_status(task);
1751                 xprt_end_transmit(task);
1752                 rpc_task_force_reencode(task);
1753                 break;
1754                 /*
1755                  * Special cases: if we've been waiting on the
1756                  * socket's write_space() callback, or if the
1757                  * socket just returned a connection error,
1758                  * then hold onto the transport lock.
1759                  */
1760         case -ECONNREFUSED:
1761         case -EHOSTDOWN:
1762         case -EHOSTUNREACH:
1763         case -ENETUNREACH:
1764                 if (RPC_IS_SOFTCONN(task)) {
1765                         xprt_end_transmit(task);
1766                         rpc_exit(task, task->tk_status);
1767                         break;
1768                 }
1769         case -ECONNRESET:
1770         case -ENOTCONN:
1771         case -EPIPE:
1772                 rpc_task_force_reencode(task);
1773         }
1774 }
1775
1776 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1777 /*
1778  * 5b.  Send the backchannel RPC reply.  On error, drop the reply.  In
1779  * addition, disconnect on connectivity errors.
1780  */
1781 static void
1782 call_bc_transmit(struct rpc_task *task)
1783 {
1784         struct rpc_rqst *req = task->tk_rqstp;
1785
1786         task->tk_status = xprt_prepare_transmit(task);
1787         if (task->tk_status == -EAGAIN) {
1788                 /*
1789                  * Could not reserve the transport. Try again after the
1790                  * transport is released.
1791                  */
1792                 task->tk_status = 0;
1793                 task->tk_action = call_bc_transmit;
1794                 return;
1795         }
1796
1797         task->tk_action = rpc_exit_task;
1798         if (task->tk_status < 0) {
1799                 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1800                         "error: %d\n", task->tk_status);
1801                 return;
1802         }
1803
1804         xprt_transmit(task);
1805         xprt_end_transmit(task);
1806         dprint_status(task);
1807         switch (task->tk_status) {
1808         case 0:
1809                 /* Success */
1810                 break;
1811         case -EHOSTDOWN:
1812         case -EHOSTUNREACH:
1813         case -ENETUNREACH:
1814         case -ETIMEDOUT:
1815                 /*
1816                  * Problem reaching the server.  Disconnect and let the
1817                  * forechannel reestablish the connection.  The server will
1818                  * have to retransmit the backchannel request and we'll
1819                  * reprocess it.  Since these ops are idempotent, there's no
1820                  * need to cache our reply at this time.
1821                  */
1822                 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1823                         "error: %d\n", task->tk_status);
1824                 xprt_conditional_disconnect(req->rq_xprt,
1825                         req->rq_connect_cookie);
1826                 break;
1827         default:
1828                 /*
1829                  * We were unable to reply and will have to drop the
1830                  * request.  The server should reconnect and retransmit.
1831                  */
1832                 WARN_ON_ONCE(task->tk_status == -EAGAIN);
1833                 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1834                         "error: %d\n", task->tk_status);
1835                 break;
1836         }
1837         rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
1838 }
1839 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1840
1841 /*
1842  * 6.   Sort out the RPC call status
1843  */
1844 static void
1845 call_status(struct rpc_task *task)
1846 {
1847         struct rpc_clnt *clnt = task->tk_client;
1848         struct rpc_rqst *req = task->tk_rqstp;
1849         int             status;
1850
1851         if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
1852                 task->tk_status = req->rq_reply_bytes_recvd;
1853
1854         dprint_status(task);
1855
1856         status = task->tk_status;
1857         if (status >= 0) {
1858                 task->tk_action = call_decode;
1859                 return;
1860         }
1861
1862         trace_rpc_call_status(task);
1863         task->tk_status = 0;
1864         switch(status) {
1865         case -EHOSTDOWN:
1866         case -EHOSTUNREACH:
1867         case -ENETUNREACH:
1868                 /*
1869                  * Delay any retries for 3 seconds, then handle as if it
1870                  * were a timeout.
1871                  */
1872                 rpc_delay(task, 3*HZ);
1873         case -ETIMEDOUT:
1874                 task->tk_action = call_timeout;
1875                 if (task->tk_client->cl_discrtry)
1876                         xprt_conditional_disconnect(req->rq_xprt,
1877                                         req->rq_connect_cookie);
1878                 break;
1879         case -ECONNRESET:
1880         case -ECONNREFUSED:
1881                 rpc_force_rebind(clnt);
1882                 rpc_delay(task, 3*HZ);
1883         case -EPIPE:
1884         case -ENOTCONN:
1885                 task->tk_action = call_bind;
1886                 break;
1887         case -EAGAIN:
1888                 task->tk_action = call_transmit;
1889                 break;
1890         case -EIO:
1891                 /* shutdown or soft timeout */
1892                 rpc_exit(task, status);
1893                 break;
1894         default:
1895                 if (clnt->cl_chatty)
1896                         printk("%s: RPC call returned error %d\n",
1897                                clnt->cl_protname, -status);
1898                 rpc_exit(task, status);
1899         }
1900 }
1901
1902 /*
1903  * 6a.  Handle RPC timeout
1904  *      We do not release the request slot, so we keep using the
1905  *      same XID for all retransmits.
1906  */
1907 static void
1908 call_timeout(struct rpc_task *task)
1909 {
1910         struct rpc_clnt *clnt = task->tk_client;
1911
1912         if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
1913                 dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
1914                 goto retry;
1915         }
1916
1917         dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
1918         task->tk_timeouts++;
1919
1920         if (RPC_IS_SOFTCONN(task)) {
1921                 rpc_exit(task, -ETIMEDOUT);
1922                 return;
1923         }
1924         if (RPC_IS_SOFT(task)) {
1925                 if (clnt->cl_chatty) {
1926                         rcu_read_lock();
1927                         printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
1928                                 clnt->cl_protname,
1929                                 rcu_dereference(clnt->cl_xprt)->servername);
1930                         rcu_read_unlock();
1931                 }
1932                 if (task->tk_flags & RPC_TASK_TIMEOUT)
1933                         rpc_exit(task, -ETIMEDOUT);
1934                 else
1935                         rpc_exit(task, -EIO);
1936                 return;
1937         }
1938
1939         if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
1940                 task->tk_flags |= RPC_CALL_MAJORSEEN;
1941                 if (clnt->cl_chatty) {
1942                         rcu_read_lock();
1943                         printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
1944                         clnt->cl_protname,
1945                         rcu_dereference(clnt->cl_xprt)->servername);
1946                         rcu_read_unlock();
1947                 }
1948         }
1949         rpc_force_rebind(clnt);
1950         /*
1951          * Did our request time out due to an RPCSEC_GSS out-of-sequence
1952          * event? RFC2203 requires the server to drop all such requests.
1953          */
1954         rpcauth_invalcred(task);
1955
1956 retry:
1957         clnt->cl_stats->rpcretrans++;
1958         task->tk_action = call_bind;
1959         task->tk_status = 0;
1960 }
1961
1962 /*
1963  * 7.   Decode the RPC reply
1964  */
1965 static void
1966 call_decode(struct rpc_task *task)
1967 {
1968         struct rpc_clnt *clnt = task->tk_client;
1969         struct rpc_rqst *req = task->tk_rqstp;
1970         kxdrdproc_t     decode = task->tk_msg.rpc_proc->p_decode;
1971         __be32          *p;
1972
1973         dprint_status(task);
1974
1975         if (task->tk_flags & RPC_CALL_MAJORSEEN) {
1976                 if (clnt->cl_chatty) {
1977                         rcu_read_lock();
1978                         printk(KERN_NOTICE "%s: server %s OK\n",
1979                                 clnt->cl_protname,
1980                                 rcu_dereference(clnt->cl_xprt)->servername);
1981                         rcu_read_unlock();
1982                 }
1983                 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
1984         }
1985
1986         /*
1987          * Ensure that we see all writes made by xprt_complete_rqst()
1988          * before it changed req->rq_reply_bytes_recvd.
1989          */
1990         smp_rmb();
1991         req->rq_rcv_buf.len = req->rq_private_buf.len;
1992
1993         /* Check that the softirq receive buffer is valid */
1994         WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
1995                                 sizeof(req->rq_rcv_buf)) != 0);
1996
1997         if (req->rq_rcv_buf.len < 12) {
1998                 if (!RPC_IS_SOFT(task)) {
1999                         task->tk_action = call_bind;
2000                         clnt->cl_stats->rpcretrans++;
2001                         goto out_retry;
2002                 }
2003                 dprintk("RPC:       %s: too small RPC reply size (%d bytes)\n",
2004                                 clnt->cl_protname, task->tk_status);
2005                 task->tk_action = call_timeout;
2006                 goto out_retry;
2007         }
2008
2009         p = rpc_verify_header(task);
2010         if (IS_ERR(p)) {
2011                 if (p == ERR_PTR(-EAGAIN))
2012                         goto out_retry;
2013                 return;
2014         }
2015
2016         task->tk_action = rpc_exit_task;
2017
2018         if (decode) {
2019                 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
2020                                                       task->tk_msg.rpc_resp);
2021         }
2022         dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
2023                         task->tk_status);
2024         return;
2025 out_retry:
2026         task->tk_status = 0;
2027         /* Note: rpc_verify_header() may have freed the RPC slot */
2028         if (task->tk_rqstp == req) {
2029                 req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
2030                 if (task->tk_client->cl_discrtry)
2031                         xprt_conditional_disconnect(req->rq_xprt,
2032                                         req->rq_connect_cookie);
2033         }
2034 }
2035
2036 static __be32 *
2037 rpc_encode_header(struct rpc_task *task)
2038 {
2039         struct rpc_clnt *clnt = task->tk_client;
2040         struct rpc_rqst *req = task->tk_rqstp;
2041         __be32          *p = req->rq_svec[0].iov_base;
2042
2043         /* FIXME: check buffer size? */
2044
2045         p = xprt_skip_transport_header(req->rq_xprt, p);
2046         *p++ = req->rq_xid;             /* XID */
2047         *p++ = htonl(RPC_CALL);         /* CALL */
2048         *p++ = htonl(RPC_VERSION);      /* RPC version */
2049         *p++ = htonl(clnt->cl_prog);    /* program number */
2050         *p++ = htonl(clnt->cl_vers);    /* program version */
2051         *p++ = htonl(task->tk_msg.rpc_proc->p_proc);    /* procedure */
2052         p = rpcauth_marshcred(task, p);
2053         req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
2054         return p;
2055 }
2056
2057 static __be32 *
2058 rpc_verify_header(struct rpc_task *task)
2059 {
2060         struct rpc_clnt *clnt = task->tk_client;
2061         struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
2062         int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
2063         __be32  *p = iov->iov_base;
2064         u32 n;
2065         int error = -EACCES;
2066
2067         if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
2068                 /* RFC-1014 says that the representation of XDR data must be a
2069                  * multiple of four bytes
2070                  * - if it isn't pointer subtraction in the NFS client may give
2071                  *   undefined results
2072                  */
2073                 dprintk("RPC: %5u %s: XDR representation not a multiple of"
2074                        " 4 bytes: 0x%x\n", task->tk_pid, __func__,
2075                        task->tk_rqstp->rq_rcv_buf.len);
2076                 goto out_eio;
2077         }
2078         if ((len -= 3) < 0)
2079                 goto out_overflow;
2080
2081         p += 1; /* skip XID */
2082         if ((n = ntohl(*p++)) != RPC_REPLY) {
2083                 dprintk("RPC: %5u %s: not an RPC reply: %x\n",
2084                         task->tk_pid, __func__, n);
2085                 goto out_garbage;
2086         }
2087
2088         if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
2089                 if (--len < 0)
2090                         goto out_overflow;
2091                 switch ((n = ntohl(*p++))) {
2092                 case RPC_AUTH_ERROR:
2093                         break;
2094                 case RPC_MISMATCH:
2095                         dprintk("RPC: %5u %s: RPC call version mismatch!\n",
2096                                 task->tk_pid, __func__);
2097                         error = -EPROTONOSUPPORT;
2098                         goto out_err;
2099                 default:
2100                         dprintk("RPC: %5u %s: RPC call rejected, "
2101                                 "unknown error: %x\n",
2102                                 task->tk_pid, __func__, n);
2103                         goto out_eio;
2104                 }
2105                 if (--len < 0)
2106                         goto out_overflow;
2107                 switch ((n = ntohl(*p++))) {
2108                 case RPC_AUTH_REJECTEDCRED:
2109                 case RPC_AUTH_REJECTEDVERF:
2110                 case RPCSEC_GSS_CREDPROBLEM:
2111                 case RPCSEC_GSS_CTXPROBLEM:
2112                         if (!task->tk_cred_retry)
2113                                 break;
2114                         task->tk_cred_retry--;
2115                         dprintk("RPC: %5u %s: retry stale creds\n",
2116                                         task->tk_pid, __func__);
2117                         rpcauth_invalcred(task);
2118                         /* Ensure we obtain a new XID! */
2119                         xprt_release(task);
2120                         task->tk_action = call_reserve;
2121                         goto out_retry;
2122                 case RPC_AUTH_BADCRED:
2123                 case RPC_AUTH_BADVERF:
2124                         /* possibly garbled cred/verf? */
2125                         if (!task->tk_garb_retry)
2126                                 break;
2127                         task->tk_garb_retry--;
2128                         dprintk("RPC: %5u %s: retry garbled creds\n",
2129                                         task->tk_pid, __func__);
2130                         task->tk_action = call_bind;
2131                         goto out_retry;
2132                 case RPC_AUTH_TOOWEAK:
2133                         rcu_read_lock();
2134                         printk(KERN_NOTICE "RPC: server %s requires stronger "
2135                                "authentication.\n",
2136                                rcu_dereference(clnt->cl_xprt)->servername);
2137                         rcu_read_unlock();
2138                         break;
2139                 default:
2140                         dprintk("RPC: %5u %s: unknown auth error: %x\n",
2141                                         task->tk_pid, __func__, n);
2142                         error = -EIO;
2143                 }
2144                 dprintk("RPC: %5u %s: call rejected %d\n",
2145                                 task->tk_pid, __func__, n);
2146                 goto out_err;
2147         }
2148         if (!(p = rpcauth_checkverf(task, p))) {
2149                 dprintk("RPC: %5u %s: auth check failed\n",
2150                                 task->tk_pid, __func__);
2151                 goto out_garbage;               /* bad verifier, retry */
2152         }
2153         len = p - (__be32 *)iov->iov_base - 1;
2154         if (len < 0)
2155                 goto out_overflow;
2156         switch ((n = ntohl(*p++))) {
2157         case RPC_SUCCESS:
2158                 return p;
2159         case RPC_PROG_UNAVAIL:
2160                 dprintk_rcu("RPC: %5u %s: program %u is unsupported "
2161                                 "by server %s\n", task->tk_pid, __func__,
2162                                 (unsigned int)clnt->cl_prog,
2163                                 rcu_dereference(clnt->cl_xprt)->servername);
2164                 error = -EPFNOSUPPORT;
2165                 goto out_err;
2166         case RPC_PROG_MISMATCH:
2167                 dprintk_rcu("RPC: %5u %s: program %u, version %u unsupported "
2168                                 "by server %s\n", task->tk_pid, __func__,
2169                                 (unsigned int)clnt->cl_prog,
2170                                 (unsigned int)clnt->cl_vers,
2171                                 rcu_dereference(clnt->cl_xprt)->servername);
2172                 error = -EPROTONOSUPPORT;
2173                 goto out_err;
2174         case RPC_PROC_UNAVAIL:
2175                 dprintk_rcu("RPC: %5u %s: proc %s unsupported by program %u, "
2176                                 "version %u on server %s\n",
2177                                 task->tk_pid, __func__,
2178                                 rpc_proc_name(task),
2179                                 clnt->cl_prog, clnt->cl_vers,
2180                                 rcu_dereference(clnt->cl_xprt)->servername);
2181                 error = -EOPNOTSUPP;
2182                 goto out_err;
2183         case RPC_GARBAGE_ARGS:
2184                 dprintk("RPC: %5u %s: server saw garbage\n",
2185                                 task->tk_pid, __func__);
2186                 break;                  /* retry */
2187         default:
2188                 dprintk("RPC: %5u %s: server accept status: %x\n",
2189                                 task->tk_pid, __func__, n);
2190                 /* Also retry */
2191         }
2192
2193 out_garbage:
2194         clnt->cl_stats->rpcgarbage++;
2195         if (task->tk_garb_retry) {
2196                 task->tk_garb_retry--;
2197                 dprintk("RPC: %5u %s: retrying\n",
2198                                 task->tk_pid, __func__);
2199                 task->tk_action = call_bind;
2200 out_retry:
2201                 return ERR_PTR(-EAGAIN);
2202         }
2203 out_eio:
2204         error = -EIO;
2205 out_err:
2206         rpc_exit(task, error);
2207         dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
2208                         __func__, error);
2209         return ERR_PTR(error);
2210 out_overflow:
2211         dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
2212                         __func__);
2213         goto out_garbage;
2214 }
2215
2216 static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
2217 {
2218 }
2219
2220 static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
2221 {
2222         return 0;
2223 }
2224
2225 static struct rpc_procinfo rpcproc_null = {
2226         .p_encode = rpcproc_encode_null,
2227         .p_decode = rpcproc_decode_null,
2228 };
2229
2230 static int rpc_ping(struct rpc_clnt *clnt)
2231 {
2232         struct rpc_message msg = {
2233                 .rpc_proc = &rpcproc_null,
2234         };
2235         int err;
2236         msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
2237         err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
2238         put_rpccred(msg.rpc_cred);
2239         return err;
2240 }
2241
2242 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
2243 {
2244         struct rpc_message msg = {
2245                 .rpc_proc = &rpcproc_null,
2246                 .rpc_cred = cred,
2247         };
2248         struct rpc_task_setup task_setup_data = {
2249                 .rpc_client = clnt,
2250                 .rpc_message = &msg,
2251                 .callback_ops = &rpc_default_ops,
2252                 .flags = flags,
2253         };
2254         return rpc_run_task(&task_setup_data);
2255 }
2256 EXPORT_SYMBOL_GPL(rpc_call_null);
2257
2258 #ifdef RPC_DEBUG
2259 static void rpc_show_header(void)
2260 {
2261         printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
2262                 "-timeout ---ops--\n");
2263 }
2264
2265 static void rpc_show_task(const struct rpc_clnt *clnt,
2266                           const struct rpc_task *task)
2267 {
2268         const char *rpc_waitq = "none";
2269
2270         if (RPC_IS_QUEUED(task))
2271                 rpc_waitq = rpc_qname(task->tk_waitqueue);
2272
2273         printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
2274                 task->tk_pid, task->tk_flags, task->tk_status,
2275                 clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
2276                 clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
2277                 task->tk_action, rpc_waitq);
2278 }
2279
2280 void rpc_show_tasks(struct net *net)
2281 {
2282         struct rpc_clnt *clnt;
2283         struct rpc_task *task;
2284         int header = 0;
2285         struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2286
2287         spin_lock(&sn->rpc_client_lock);
2288         list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
2289                 spin_lock(&clnt->cl_lock);
2290                 list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
2291                         if (!header) {
2292                                 rpc_show_header();
2293                                 header++;
2294                         }
2295                         rpc_show_task(clnt, task);
2296                 }
2297                 spin_unlock(&clnt->cl_lock);
2298         }
2299         spin_unlock(&sn->rpc_client_lock);
2300 }
2301 #endif