]> rtime.felk.cvut.cz Git - linux-imx.git/blobdiff - ipc/sem.c
Merge tag 'hwmon-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/groeck...
[linux-imx.git] / ipc / sem.c
index 4d7f88cefada0c9c3edbe1265133adbfc1ef34dd..41088899783d4106140333014a722da531494838 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -99,6 +99,7 @@ struct sem {
                                        /* that alter the semaphore */
        struct list_head pending_const; /* pending single-sop operations */
                                        /* that do not alter the semaphore*/
+       time_t  sem_otime;      /* candidate for sem_otime */
 } ____cacheline_aligned_in_smp;
 
 /* One queue for each sleeping process in the system. */
@@ -153,12 +154,15 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
 #define SEMOPM_FAST    64  /* ~ 372 bytes on stack */
 
 /*
- * linked list protection:
+ * Locking:
  *     sem_undo.id_next,
+ *     sem_array.complex_count,
  *     sem_array.pending{_alter,_cont},
- *     sem_array.sem_undo: sem_lock() for read/write
+ *     sem_array.sem_undo: global sem_lock() for read/write
  *     sem_undo.proc_next: only "current" is allowed to read/write that field.
  *     
+ *     sem_array.sem_base[i].pending_{const,alter}:
+ *             global or semaphore sem_lock() for read/write
  */
 
 #define sc_semmsl      sem_ctls[0]
@@ -192,6 +196,53 @@ void __init sem_init (void)
                                IPC_SEM_IDS, sysvipc_sem_proc_show);
 }
 
+/**
+ * unmerge_queues - unmerge queues, if possible.
+ * @sma: semaphore array
+ *
+ * The function unmerges the wait queues if complex_count is 0.
+ * It must be called prior to dropping the global semaphore array lock.
+ */
+static void unmerge_queues(struct sem_array *sma)
+{
+       struct sem_queue *q, *tq;
+
+       /* complex operations still around? */
+       if (sma->complex_count)
+               return;
+       /*
+        * We will switch back to simple mode.
+        * Move all pending operation back into the per-semaphore
+        * queues.
+        */
+       list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
+               struct sem *curr;
+               curr = &sma->sem_base[q->sops[0].sem_num];
+
+               list_add_tail(&q->list, &curr->pending_alter);
+       }
+       INIT_LIST_HEAD(&sma->pending_alter);
+}
+
+/**
+ * merge_queues - Merge single semop queues into global queue
+ * @sma: semaphore array
+ *
+ * This function merges all per-semaphore queues into the global queue.
+ * It is necessary to achieve FIFO ordering for the pending single-sop
+ * operations when a multi-semop operation must sleep.
+ * Only the alter operations must be moved, the const operations can stay.
+ */
+static void merge_queues(struct sem_array *sma)
+{
+       int i;
+       for (i = 0; i < sma->sem_nsems; i++) {
+               struct sem *sem = sma->sem_base + i;
+
+               list_splice_init(&sem->pending_alter, &sma->pending_alter);
+       }
+}
+
 /*
  * If the request contains only one semaphore operation, and there are
  * no complex transactions pending, lock only the semaphore involved.
@@ -262,6 +313,7 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
 static inline void sem_unlock(struct sem_array *sma, int locknum)
 {
        if (locknum == -1) {
+               unmerge_queues(sma);
                ipc_unlock_object(&sma->sem_perm);
        } else {
                struct sem *sem = sma->sem_base + locknum;
@@ -487,12 +539,19 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
        return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 }
 
-/*
- * Determine whether a sequence of semaphore operations would succeed
- * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
+/** perform_atomic_semop - Perform (if possible) a semaphore operation
+ * @sma: semaphore array
+ * @sops: array with operations that should be checked
+ * @nsems: number of sops
+ * @un: undo array
+ * @pid: pid that did the change
+ *
+ * Returns 0 if the operation was possible.
+ * Returns 1 if the operation is impossible, the caller must sleep.
+ * Negative values are error codes.
  */
 
-static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
+static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
                             int nsops, struct sem_undo *un, int pid)
 {
        int result, sem_op;
@@ -675,8 +734,8 @@ static int wake_const_ops(struct sem_array *sma, int semnum,
                q = container_of(walk, struct sem_queue, list);
                walk = walk->next;
 
-               error = try_atomic_semop(sma, q->sops, q->nsops,
-                                               q->undo, q->pid);
+               error = perform_atomic_semop(sma, q->sops, q->nsops,
+                                                q->undo, q->pid);
 
                if (error <= 0) {
                        /* operation completed, remove from queue & wakeup */
@@ -789,7 +848,7 @@ again:
                if (semnum != -1 && sma->sem_base[semnum].semval == 0)
                        break;
 
-               error = try_atomic_semop(sma, q->sops, q->nsops,
+               error = perform_atomic_semop(sma, q->sops, q->nsops,
                                         q->undo, q->pid);
 
                /* Does q->sleeper still need to sleep? */
@@ -831,51 +890,46 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
                        int otime, struct list_head *pt)
 {
        int i;
-       int progress;
 
        otime |= do_smart_wakeup_zero(sma, sops, nsops, pt);
 
-       progress = 1;
-retry_global:
-       if (sma->complex_count) {
-               if (update_queue(sma, -1, pt)) {
-                       progress = 1;
-                       otime = 1;
-                       sops = NULL;
-               }
-       }
-       if (!progress)
-               goto done;
-
-       if (!sops) {
-               /* No semops; something special is going on. */
-               for (i = 0; i < sma->sem_nsems; i++) {
-                       if (update_queue(sma, i, pt)) {
-                               otime = 1;
-                               progress = 1;
+       if (!list_empty(&sma->pending_alter)) {
+               /* semaphore array uses the global queue - just process it. */
+               otime |= update_queue(sma, -1, pt);
+       } else {
+               if (!sops) {
+                       /*
+                        * No sops, thus the modified semaphores are not
+                        * known. Check all.
+                        */
+                       for (i = 0; i < sma->sem_nsems; i++)
+                               otime |= update_queue(sma, i, pt);
+               } else {
+                       /*
+                        * Check the semaphores that were increased:
+                        * - No complex ops, thus all sleeping ops are
+                        *   decrease.
+                        * - if we decreased the value, then any sleeping
+                        *   semaphore ops wont be able to run: If the
+                        *   previous value was too small, then the new
+                        *   value will be too small, too.
+                        */
+                       for (i = 0; i < nsops; i++) {
+                               if (sops[i].sem_op > 0) {
+                                       otime |= update_queue(sma,
+                                                       sops[i].sem_num, pt);
+                               }
                        }
                }
-               goto done_checkretry;
-       }
-
-       /* Check the semaphores that were modified. */
-       for (i = 0; i < nsops; i++) {
-               if (sops[i].sem_op > 0 ||
-                       (sops[i].sem_op < 0 &&
-                               sma->sem_base[sops[i].sem_num].semval == 0))
-                       if (update_queue(sma, sops[i].sem_num, pt)) {
-                               otime = 1;
-                               progress = 1;
-                       }
        }
-done_checkretry:
-       if (progress) {
-               progress = 0;
-               goto retry_global;
+       if (otime) {
+               if (sops == NULL) {
+                       sma->sem_base[0].sem_otime = get_seconds();
+               } else {
+                       sma->sem_base[sops[0].sem_num].sem_otime =
+                                                               get_seconds();
+               }
        }
-done:
-       if (otime)
-               sma->sem_otime = get_seconds();
 }
 
 
@@ -1021,6 +1075,21 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in,
        }
 }
 
+static time_t get_semotime(struct sem_array *sma)
+{
+       int i;
+       time_t res;
+
+       res = sma->sem_base[0].sem_otime;
+       for (i = 1; i < sma->sem_nsems; i++) {
+               time_t to = sma->sem_base[i].sem_otime;
+
+               if (to > res)
+                       res = to;
+       }
+       return res;
+}
+
 static int semctl_nolock(struct ipc_namespace *ns, int semid,
                         int cmd, int version, void __user *p)
 {
@@ -1094,9 +1163,9 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
                        goto out_unlock;
 
                kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
-               tbuf.sem_otime  = sma->sem_otime;
-               tbuf.sem_ctime  = sma->sem_ctime;
-               tbuf.sem_nsems  = sma->sem_nsems;
+               tbuf.sem_otime = get_semotime(sma);
+               tbuf.sem_ctime = sma->sem_ctime;
+               tbuf.sem_nsems = sma->sem_nsems;
                rcu_read_unlock();
                if (copy_semid_to_user(p, &tbuf, version))
                        return -EFAULT;
@@ -1627,7 +1696,6 @@ static int get_queue_result(struct sem_queue *q)
        return error;
 }
 
-
 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
                unsigned, nsops, const struct timespec __user *, timeout)
 {
@@ -1725,7 +1793,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
        if (un && un->semid == -1)
                goto out_unlock_free;
 
-       error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
+       error = perform_atomic_semop(sma, sops, nsops, un,
+                                       task_tgid_vnr(current));
        if (error <= 0) {
                if (alter && error == 0)
                        do_smart_update(sma, sops, nsops, 1, &tasks);
@@ -1747,11 +1816,22 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
                struct sem *curr;
                curr = &sma->sem_base[sops->sem_num];
 
-               if (alter)
-                       list_add_tail(&queue.list, &curr->pending_alter);
-               else
+               if (alter) {
+                       if (sma->complex_count) {
+                               list_add_tail(&queue.list,
+                                               &sma->pending_alter);
+                       } else {
+
+                               list_add_tail(&queue.list,
+                                               &curr->pending_alter);
+                       }
+               } else {
                        list_add_tail(&queue.list, &curr->pending_const);
+               }
        } else {
+               if (!sma->complex_count)
+                       merge_queues(sma);
+
                if (alter)
                        list_add_tail(&queue.list, &sma->pending_alter);
                else
@@ -1977,6 +2057,9 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
 {
        struct user_namespace *user_ns = seq_user_ns(s);
        struct sem_array *sma = it;
+       time_t sem_otime;
+
+       sem_otime = get_semotime(sma);
 
        return seq_printf(s,
                          "%10d %10d  %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
@@ -1988,7 +2071,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
                          from_kgid_munged(user_ns, sma->sem_perm.gid),
                          from_kuid_munged(user_ns, sma->sem_perm.cuid),
                          from_kgid_munged(user_ns, sma->sem_perm.cgid),
-                         sma->sem_otime,
+                         sem_otime,
                          sma->sem_ctime);
 }
 #endif