]> rtime.felk.cvut.cz Git - zynq/linux.git/blobdiff - arch/x86/platform/uv/uv_time.c
Apply preempt_rt patch-4.9-rt1.patch.xz
[zynq/linux.git] / arch / x86 / platform / uv / uv_time.c
index b333fc45f9ec8025e72b2ad29abfeffbc7285e4b..8b85916e698605e493a12f39d5220ff4ba47fa12 100644 (file)
@@ -57,7 +57,7 @@ static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
 
 /* There is one of these allocated per node */
 struct uv_rtc_timer_head {
-       spinlock_t      lock;
+       raw_spinlock_t  lock;
        /* next cpu waiting for timer, local node relative: */
        int             next_cpu;
        /* number of cpus on this node: */
@@ -177,7 +177,7 @@ static __init int uv_rtc_allocate_timers(void)
                                uv_rtc_deallocate_timers();
                                return -ENOMEM;
                        }
-                       spin_lock_init(&head->lock);
+                       raw_spin_lock_init(&head->lock);
                        head->ncpus = uv_blade_nr_possible_cpus(bid);
                        head->next_cpu = -1;
                        blade_info[bid] = head;
@@ -231,7 +231,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
        unsigned long flags;
        int next_cpu;
 
-       spin_lock_irqsave(&head->lock, flags);
+       raw_spin_lock_irqsave(&head->lock, flags);
 
        next_cpu = head->next_cpu;
        *t = expires;
@@ -243,12 +243,12 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
                if (uv_setup_intr(cpu, expires)) {
                        *t = ULLONG_MAX;
                        uv_rtc_find_next_timer(head, pnode);
-                       spin_unlock_irqrestore(&head->lock, flags);
+                       raw_spin_unlock_irqrestore(&head->lock, flags);
                        return -ETIME;
                }
        }
 
-       spin_unlock_irqrestore(&head->lock, flags);
+       raw_spin_unlock_irqrestore(&head->lock, flags);
        return 0;
 }
 
@@ -267,7 +267,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
        unsigned long flags;
        int rc = 0;
 
-       spin_lock_irqsave(&head->lock, flags);
+       raw_spin_lock_irqsave(&head->lock, flags);
 
        if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
                rc = 1;
@@ -279,7 +279,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
                        uv_rtc_find_next_timer(head, pnode);
        }
 
-       spin_unlock_irqrestore(&head->lock, flags);
+       raw_spin_unlock_irqrestore(&head->lock, flags);
 
        return rc;
 }
@@ -299,13 +299,18 @@ static int uv_rtc_unset_timer(int cpu, int force)
 static cycle_t uv_read_rtc(struct clocksource *cs)
 {
        unsigned long offset;
+       cycle_t cycles;
 
+       preempt_disable();
        if (uv_get_min_hub_revision_id() == 1)
                offset = 0;
        else
                offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
 
-       return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
+       cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
+       preempt_enable();
+
+       return cycles;
 }
 
 /*