]> rtime.felk.cvut.cz Git - zynq/linux.git/blobdiff - arch/x86/platform/uv/tlb_uv.c
Merge branch '4.0.8-rt6'
[zynq/linux.git] / arch / x86 / platform / uv / tlb_uv.c
index 994798548b1ad57288e4b51c01422d3dad70ff7f..11b78b7c32b47e4d347ddfefe8bf5372a4b57b0f 100644 (file)
@@ -714,9 +714,9 @@ static void destination_plugged(struct bau_desc *bau_desc,
 
                quiesce_local_uvhub(hmaster);
 
-               spin_lock(&hmaster->queue_lock);
+               raw_spin_lock(&hmaster->queue_lock);
                reset_with_ipi(&bau_desc->distribution, bcp);
-               spin_unlock(&hmaster->queue_lock);
+               raw_spin_unlock(&hmaster->queue_lock);
 
                end_uvhub_quiesce(hmaster);
 
@@ -736,9 +736,9 @@ static void destination_timeout(struct bau_desc *bau_desc,
 
                quiesce_local_uvhub(hmaster);
 
-               spin_lock(&hmaster->queue_lock);
+               raw_spin_lock(&hmaster->queue_lock);
                reset_with_ipi(&bau_desc->distribution, bcp);
-               spin_unlock(&hmaster->queue_lock);
+               raw_spin_unlock(&hmaster->queue_lock);
 
                end_uvhub_quiesce(hmaster);
 
@@ -759,7 +759,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
        cycles_t tm1;
 
        hmaster = bcp->uvhub_master;
-       spin_lock(&hmaster->disable_lock);
+       raw_spin_lock(&hmaster->disable_lock);
        if (!bcp->baudisabled) {
                stat->s_bau_disabled++;
                tm1 = get_cycles();
@@ -772,7 +772,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
                        }
                }
        }
-       spin_unlock(&hmaster->disable_lock);
+       raw_spin_unlock(&hmaster->disable_lock);
 }
 
 static void count_max_concurr(int stat, struct bau_control *bcp,
@@ -835,7 +835,7 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
  */
 static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
 {
-       spinlock_t *lock = &hmaster->uvhub_lock;
+       raw_spinlock_t *lock = &hmaster->uvhub_lock;
        atomic_t *v;
 
        v = &hmaster->active_descriptor_count;
@@ -968,7 +968,7 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
        struct bau_control *hmaster;
 
        hmaster = bcp->uvhub_master;
-       spin_lock(&hmaster->disable_lock);
+       raw_spin_lock(&hmaster->disable_lock);
        if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
                stat->s_bau_reenabled++;
                for_each_present_cpu(tcpu) {
@@ -980,10 +980,10 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
                                tbcp->period_giveups = 0;
                        }
                }
-               spin_unlock(&hmaster->disable_lock);
+               raw_spin_unlock(&hmaster->disable_lock);
                return 0;
        }
-       spin_unlock(&hmaster->disable_lock);
+       raw_spin_unlock(&hmaster->disable_lock);
        return -1;
 }
 
@@ -1901,9 +1901,9 @@ static void __init init_per_cpu_tunables(void)
                bcp->cong_reps                  = congested_reps;
                bcp->disabled_period =          sec_2_cycles(disabled_period);
                bcp->giveup_limit =             giveup_limit;
-               spin_lock_init(&bcp->queue_lock);
-               spin_lock_init(&bcp->uvhub_lock);
-               spin_lock_init(&bcp->disable_lock);
+               raw_spin_lock_init(&bcp->queue_lock);
+               raw_spin_lock_init(&bcp->uvhub_lock);
+               raw_spin_lock_init(&bcp->disable_lock);
        }
 }