linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [ANNOUNCE] v5.2.9-rt3
@ 2019-08-16 15:36 Sebastian Andrzej Siewior
  2019-08-19 11:03 ` Alexander Dahl
  0 siblings, 1 reply; 9+ messages in thread
From: Sebastian Andrzej Siewior @ 2019-08-16 15:36 UTC (permalink / raw)
  To: Thomas Gleixner; +Cc: LKML, linux-rt-users, Steven Rostedt

Dear RT folks!

I'm pleased to announce the v5.2.9-rt3 patch set. 

Changes since v5.2.9-rt2:

  - The exynos5 i2c controller disabled IRQ threading as reported by
    Benjamin Rouxel. The hix5hd2 i2c controller did the same.

  - A timer related to the deadline scheduler now fires in hard-irq
    context. Patch by Juri Lelli.

  - A lock used the x86's thermal exception uses a raw_spinlock_t. Patch
    by Clark Williams.

  - The DMA-reservation code is using now a sequence lock instead a
    sequence counter. Yann Collette reported warnings from that area
    with an AMD GPU.

  - Two kvm related timer on arm64 expire now hard-irq context. Reported
    by Julien Grall, patched by Thomas Gleixner.

  - Lazy preemption was broken in a case on arm64, reported by Paul
    Thomas. While investigating another lazy-preempt bug was fixed on
    arm64 and x86.

Known issues
     - rcutorture is currently broken on -RT. Reported by Juri Lelli.

The delta patch against v5.2.9-rt2 is appended below and can be found here:
 
     https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.2/incr/patch-5.2.9-rt2-rt3.patch.xz

You can get this release via the git tree at:

    git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.2.9-rt3

The RT patch against v5.2.9 can be found here:

    https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patch-5.2.9-rt3.patch.xz

The split quilt queue is available at:

    https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2.9-rt3.tar.xz

Sebastian

diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index 3bfad251203b5..ca1c6fe8dd347 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -73,6 +73,8 @@ static inline bool __preempt_count_dec_and_test(void)
 	if (!pc || !READ_ONCE(ti->preempt_count))
 		return true;
 #ifdef CONFIG_PREEMPT_LAZY
+	if ((pc & ~PREEMPT_NEED_RESCHED))
+		return false;
 	if (current_thread_info()->preempt_lazy_count)
 		return false;
 	return test_thread_flag(TIF_NEED_RESCHED_LAZY);
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index aa16cb43a779e..5d651c560bba6 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -680,7 +680,8 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
 	orr	x24, x24, x0
 alternative_else_nop_endif
 
-	cbnz	x24, 2f					// preempt count != 0
+	cbz	x24, 1f					// (need_resched + count) == 0
+	cbnz	w24, 2f					// count != 0
 
 	ldr	w24, [tsk, #TSK_TI_PREEMPT_LAZY]	// get preempt lazy count
 	cbnz	w24, 2f					// preempt lazy count != 0
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index f8e42abd874a4..9496299d23fc3 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -99,6 +99,8 @@ static __always_inline bool __preempt_count_dec_and_test(void)
 	if (____preempt_count_dec_and_test())
 		return true;
 #ifdef CONFIG_PREEMPT_LAZY
+	if (preempt_count())
+		return false;
 	if (current_thread_info()->preempt_lazy_count)
 		return false;
 	return test_thread_flag(TIF_NEED_RESCHED_LAZY);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index a6fee5a6e9fb2..27fffd65abe6b 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -168,7 +168,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 		return 0;
 
 retry:
-	seq = read_seqcount_begin(&resv->seq);
+	seq = read_seqbegin(&resv->seq);
 	rcu_read_lock();
 
 	fobj = rcu_dereference(resv->fence);
@@ -177,7 +177,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 	else
 		shared_count = 0;
 	fence_excl = rcu_dereference(resv->fence_excl);
-	if (read_seqcount_retry(&resv->seq, seq)) {
+	if (read_seqretry(&resv->seq, seq)) {
 		rcu_read_unlock();
 		goto retry;
 	}
@@ -1034,12 +1034,12 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
 
 		robj = buf_obj->resv;
 		while (true) {
-			seq = read_seqcount_begin(&robj->seq);
+			seq = read_seqbegin(&robj->seq);
 			rcu_read_lock();
 			fobj = rcu_dereference(robj->fence);
 			shared_count = fobj ? fobj->shared_count : 0;
 			fence = rcu_dereference(robj->fence_excl);
-			if (!read_seqcount_retry(&robj->seq, seq))
+			if (!read_seqretry(&robj->seq, seq))
 				break;
 			rcu_read_unlock();
 		}
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 4447e13d1e891..030c45ad3e56a 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -110,15 +110,13 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
 	new->shared_count = j;
 	new->shared_max = max;
 
-	preempt_disable();
-	write_seqcount_begin(&obj->seq);
+	write_seqlock(&obj->seq);
 	/*
 	 * RCU_INIT_POINTER can be used here,
 	 * seqcount provides the necessary barriers
 	 */
 	RCU_INIT_POINTER(obj->fence, new);
-	write_seqcount_end(&obj->seq);
-	preempt_enable();
+	write_sequnlock(&obj->seq);
 
 	if (!old)
 		return 0;
@@ -158,8 +156,7 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
 	fobj = reservation_object_get_list(obj);
 	count = fobj->shared_count;
 
-	preempt_disable();
-	write_seqcount_begin(&obj->seq);
+	write_seqlock(&obj->seq);
 
 	for (i = 0; i < count; ++i) {
 		struct dma_fence *old_fence;
@@ -181,8 +178,7 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
 	/* pointer update must be visible before we extend the shared_count */
 	smp_store_mb(fobj->shared_count, count);
 
-	write_seqcount_end(&obj->seq);
-	preempt_enable();
+	write_sequnlock(&obj->seq);
 }
 EXPORT_SYMBOL(reservation_object_add_shared_fence);
 
@@ -209,14 +205,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
 	if (fence)
 		dma_fence_get(fence);
 
-	preempt_disable();
-	write_seqcount_begin(&obj->seq);
-	/* write_seqcount_begin provides the necessary memory barrier */
+	write_seqlock(&obj->seq);
 	RCU_INIT_POINTER(obj->fence_excl, fence);
 	if (old)
 		old->shared_count = 0;
-	write_seqcount_end(&obj->seq);
-	preempt_enable();
+	write_sequnlock(&obj->seq);
 
 	/* inplace update, no shared fences */
 	while (i--)
@@ -298,13 +291,10 @@ int reservation_object_copy_fences(struct reservation_object *dst,
 	src_list = reservation_object_get_list(dst);
 	old = reservation_object_get_excl(dst);
 
-	preempt_disable();
-	write_seqcount_begin(&dst->seq);
-	/* write_seqcount_begin provides the necessary memory barrier */
+	write_seqlock(&dst->seq);
 	RCU_INIT_POINTER(dst->fence_excl, new);
 	RCU_INIT_POINTER(dst->fence, dst_list);
-	write_seqcount_end(&dst->seq);
-	preempt_enable();
+	write_sequnlock(&dst->seq);
 
 	if (src_list)
 		kfree_rcu(src_list, rcu);
@@ -345,7 +335,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
 		shared_count = i = 0;
 
 		rcu_read_lock();
-		seq = read_seqcount_begin(&obj->seq);
+		seq = read_seqbegin(&obj->seq);
 
 		fence_excl = rcu_dereference(obj->fence_excl);
 		if (fence_excl && !dma_fence_get_rcu(fence_excl))
@@ -394,7 +384,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
 			}
 		}
 
-		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
+		if (i != shared_count || read_seqretry(&obj->seq, seq)) {
 			while (i--)
 				dma_fence_put(shared[i]);
 			dma_fence_put(fence_excl);
@@ -443,7 +433,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 
 retry:
 	shared_count = 0;
-	seq = read_seqcount_begin(&obj->seq);
+	seq = read_seqbegin(&obj->seq);
 	rcu_read_lock();
 	i = -1;
 
@@ -490,7 +480,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 
 	rcu_read_unlock();
 	if (fence) {
-		if (read_seqcount_retry(&obj->seq, seq)) {
+		if (read_seqretry(&obj->seq, seq)) {
 			dma_fence_put(fence);
 			goto retry;
 		}
@@ -546,7 +536,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
 retry:
 	ret = true;
 	shared_count = 0;
-	seq = read_seqcount_begin(&obj->seq);
+	seq = read_seqbegin(&obj->seq);
 
 	if (test_all) {
 		unsigned i;
@@ -567,7 +557,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
 				break;
 		}
 
-		if (read_seqcount_retry(&obj->seq, seq))
+		if (read_seqretry(&obj->seq, seq))
 			goto retry;
 	}
 
@@ -580,7 +570,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
 			if (ret < 0)
 				goto retry;
 
-			if (read_seqcount_retry(&obj->seq, seq))
+			if (read_seqretry(&obj->seq, seq))
 				goto retry;
 		}
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 4b192e0ce92f4..be625817e5d95 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -250,11 +250,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 	new->shared_count = k;
 
 	/* Install the new fence list, seqcount provides the barriers */
-	preempt_disable();
-	write_seqcount_begin(&resv->seq);
+	write_seqlock(&resv->seq);
 	RCU_INIT_POINTER(resv->fence, new);
-	write_seqcount_end(&resv->seq);
-	preempt_enable();
+	write_sequnlock(&resv->seq);
 
 	/* Drop the references to the removed fences or move them to ef_list */
 	for (i = j, k = 0; i < old->shared_count; ++i) {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ad01c92aaf748..2910a133077a3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -449,7 +449,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 				 unsigned int flags,
 				 long timeout)
 {
-	unsigned int seq = __read_seqcount_begin(&resv->seq);
+	unsigned int seq = read_seqbegin(&resv->seq);
 	struct dma_fence *excl;
 	bool prune_fences = false;
 
@@ -500,9 +500,9 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 	 * signaled and that the reservation object has not been changed (i.e.
 	 * no new fences have been added).
 	 */
-	if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
+	if (prune_fences && !read_seqretry(&resv->seq, seq)) {
 		if (reservation_object_trylock(resv)) {
-			if (!__read_seqcount_retry(&resv->seq, seq))
+			if (!read_seqretry(&resv->seq, seq))
 				reservation_object_add_excl_fence(resv, NULL);
 			reservation_object_unlock(resv);
 		}
@@ -3943,7 +3943,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	 *
 	 */
 retry:
-	seq = raw_read_seqcount(&obj->resv->seq);
+	seq = read_seqbegin(&obj->resv->seq);
 
 	/* Translate the exclusive fence to the READ *and* WRITE engine */
 	args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
@@ -3961,7 +3961,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 		}
 	}
 
-	if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
+	if (args->busy && read_seqretry(&obj->resv->seq, seq))
 		goto retry;
 
 	err = 0;
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index e4e7932f78000..e7514c16b756c 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -791,9 +791,7 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
 	}
 
 	ret = devm_request_irq(&pdev->dev, i2c->irq, exynos5_i2c_irq,
-				IRQF_NO_SUSPEND | IRQF_ONESHOT,
-				dev_name(&pdev->dev), i2c);
-
+			       IRQF_NO_SUSPEND, dev_name(&pdev->dev), i2c);
 	if (ret != 0) {
 		dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", i2c->irq);
 		goto err_clk;
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 4df1434b3597d..8497c7a95dd44 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -445,8 +445,7 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
 	hix5hd2_i2c_init(priv);
 
 	ret = devm_request_irq(&pdev->dev, irq, hix5hd2_i2c_irq,
-			       IRQF_NO_SUSPEND | IRQF_ONESHOT,
-			       dev_name(&pdev->dev), priv);
+			       IRQF_NO_SUSPEND, dev_name(&pdev->dev), priv);
 	if (ret != 0) {
 		dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", irq);
 		goto err_clk;
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index 319b771261686..92ceed3de6f39 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -63,7 +63,7 @@ static int max_packages __read_mostly;
 /* Array of package pointers */
 static struct pkg_device **packages;
 /* Serializes interrupt notification, work and hotplug */
-static DEFINE_SPINLOCK(pkg_temp_lock);
+static DEFINE_RAW_SPINLOCK(pkg_temp_lock);
 /* Protects zone operation in the work function against hotplug removal */
 static DEFINE_MUTEX(thermal_zone_mutex);
 
@@ -279,12 +279,12 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
 	u64 msr_val, wr_val;
 
 	mutex_lock(&thermal_zone_mutex);
-	spin_lock_irq(&pkg_temp_lock);
+	raw_spin_lock_irq(&pkg_temp_lock);
 	++pkg_work_cnt;
 
 	pkgdev = pkg_temp_thermal_get_dev(cpu);
 	if (!pkgdev) {
-		spin_unlock_irq(&pkg_temp_lock);
+		raw_spin_unlock_irq(&pkg_temp_lock);
 		mutex_unlock(&thermal_zone_mutex);
 		return;
 	}
@@ -298,7 +298,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
 	}
 
 	enable_pkg_thres_interrupt();
-	spin_unlock_irq(&pkg_temp_lock);
+	raw_spin_unlock_irq(&pkg_temp_lock);
 
 	/*
 	 * If tzone is not NULL, then thermal_zone_mutex will prevent the
@@ -323,7 +323,7 @@ static int pkg_thermal_notify(u64 msr_val)
 	struct pkg_device *pkgdev;
 	unsigned long flags;
 
-	spin_lock_irqsave(&pkg_temp_lock, flags);
+	raw_spin_lock_irqsave(&pkg_temp_lock, flags);
 	++pkg_interrupt_cnt;
 
 	disable_pkg_thres_interrupt();
@@ -335,7 +335,7 @@ static int pkg_thermal_notify(u64 msr_val)
 		pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work);
 	}
 
-	spin_unlock_irqrestore(&pkg_temp_lock, flags);
+	raw_spin_unlock_irqrestore(&pkg_temp_lock, flags);
 	return 0;
 }
 
@@ -381,9 +381,9 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
 	      pkgdev->msr_pkg_therm_high);
 
 	cpumask_set_cpu(cpu, &pkgdev->cpumask);
-	spin_lock_irq(&pkg_temp_lock);
+	raw_spin_lock_irq(&pkg_temp_lock);
 	packages[pkgid] = pkgdev;
-	spin_unlock_irq(&pkg_temp_lock);
+	raw_spin_unlock_irq(&pkg_temp_lock);
 	return 0;
 }
 
@@ -420,7 +420,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
 	}
 
 	/* Protect against work and interrupts */
-	spin_lock_irq(&pkg_temp_lock);
+	raw_spin_lock_irq(&pkg_temp_lock);
 
 	/*
 	 * Check whether this cpu was the current target and store the new
@@ -452,9 +452,9 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
 		 * To cancel the work we need to drop the lock, otherwise
 		 * we might deadlock if the work needs to be flushed.
 		 */
-		spin_unlock_irq(&pkg_temp_lock);
+		raw_spin_unlock_irq(&pkg_temp_lock);
 		cancel_delayed_work_sync(&pkgdev->work);
-		spin_lock_irq(&pkg_temp_lock);
+		raw_spin_lock_irq(&pkg_temp_lock);
 		/*
 		 * If this is not the last cpu in the package and the work
 		 * did not run after we dropped the lock above, then we
@@ -465,7 +465,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
 			pkg_thermal_schedule_work(target, &pkgdev->work);
 	}
 
-	spin_unlock_irq(&pkg_temp_lock);
+	raw_spin_unlock_irq(&pkg_temp_lock);
 
 	/* Final cleanup if this is the last cpu */
 	if (lastcpu)
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index ee750765cc941..11cc05f489365 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -71,7 +71,7 @@ struct reservation_object_list {
  */
 struct reservation_object {
 	struct ww_mutex lock;
-	seqcount_t seq;
+	seqlock_t seq;
 
 	struct dma_fence __rcu *fence_excl;
 	struct reservation_object_list __rcu *fence;
@@ -90,7 +90,7 @@ reservation_object_init(struct reservation_object *obj)
 {
 	ww_mutex_init(&obj->lock, &reservation_ww_class);
 
-	__seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class);
+	seqlock_init(&obj->seq);
 	RCU_INIT_POINTER(obj->fence, NULL);
 	RCU_INIT_POINTER(obj->fence_excl, NULL);
 }
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index c18be51f76088..1758f2a2d775a 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -287,7 +287,7 @@ static void task_non_contending(struct task_struct *p)
 
 	dl_se->dl_non_contending = 1;
 	get_task_struct(p);
-	hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL);
+	hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
 }
 
 static void task_contending(struct sched_dl_entity *dl_se, int flags)
@@ -1292,7 +1292,7 @@ void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
 {
 	struct hrtimer *timer = &dl_se->inactive_timer;
 
-	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
 	timer->function = inactive_task_timer;
 }
 
diff --git a/localversion-rt b/localversion-rt
index c3054d08a1129..1445cd65885cd 100644
--- a/localversion-rt
+++ b/localversion-rt
@@ -1 +1 @@
--rt2
+-rt3
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 1be486d5d7cb4..0bfa7c5b5c890 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -80,7 +80,7 @@ static inline bool userspace_irqchip(struct kvm *kvm)
 static void soft_timer_start(struct hrtimer *hrt, u64 ns)
 {
 	hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
-		      HRTIMER_MODE_ABS);
+		      HRTIMER_MODE_ABS_HARD);
 }
 
 static void soft_timer_cancel(struct hrtimer *hrt)
@@ -697,11 +697,11 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
 	update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
 	ptimer->cntvoff = 0;
 
-	hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+	hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
 	timer->bg_timer.function = kvm_bg_timer_expire;
 
-	hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
-	hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+	hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
+	hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
 	vtimer->hrtimer.function = kvm_hrtimer_expire;
 	ptimer->hrtimer.function = kvm_hrtimer_expire;
 

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [ANNOUNCE] v5.2.9-rt3
  2019-08-16 15:36 [ANNOUNCE] v5.2.9-rt3 Sebastian Andrzej Siewior
@ 2019-08-19 11:03 ` Alexander Dahl
  2019-08-20 15:44   ` Alexandre Belloni
  0 siblings, 1 reply; 9+ messages in thread
From: Alexander Dahl @ 2019-08-19 11:03 UTC (permalink / raw)
  To: linux-rt-users
  Cc: Sebastian Andrzej Siewior, Thomas Gleixner, LKML, Steven Rostedt,
	Alexandre Belloni

Hei hei,

just tried to compile this v5.2.9-rt3 for SAMA5D27-SOM1-EK1 based on 
arch/arm/configs/sama5_defconfig and with running oldconfig and selecting 
defaults, but that fails if CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK is not set. 

I think this is due to changes for Atmel TCLIB in v5.2 and the not yet adapted 
RT patch "clocksource: TCLIB: Allow higher clock rates for clock events", 
right?

What's the recommended setting of this option for RT?

See compiler output below.

Greets
Alex

----------------------
target: kernel.compile
----------------------

make[1]: Entering directory '/mnt/data/adahl/src/DistroKit/platform-v7a/build-
target/linux-5.2.9'
  CALL    scripts/atomic/check-atomics.sh
  CALL    scripts/checksyscalls.sh
  GEN     usr/initramfs_data.cpio
  CHK     include/generated/compile.h
  AS      usr/initramfs_data.o
  AR      usr/built-in.a
  CC      drivers/clocksource/timer-atmel-tcb.o
  AR      drivers/crypto/hisilicon/built-in.a
  CC      drivers/crypto/atmel-aes.o
drivers/clocksource/timer-atmel-tcb.c: In function 'tcb_clksrc_init':
drivers/clocksource/timer-atmel-tcb.c:485:24: error: incompatible type for 
argument 1 of 'setup_clkevents'
drivers/clocksource/timer-atmel-tcb.c:268:19: note: expected 'struct atmel_tc 
*' but argument is of type 'struct atmel_tc'
scripts/Makefile.build:278: recipe for target 'drivers/clocksource/timer-
atmel-tcb.o' failed
make[3]: *** [drivers/clocksource/timer-atmel-tcb.o] Error 1
make[2]: *** [drivers/clocksource] Error 2
make[2]: *** Waiting for unfinished jobs....
scripts/Makefile.build:489: recipe for target 'drivers/clocksource' failed
  CC      drivers/crypto/atmel-sha.o
  CC      drivers/crypto/atmel-tdes.o
  AR      drivers/crypto/built-in.a
Makefile:1073: recipe for target 'drivers' failed
make[1]: *** [drivers] Error 2
make[1]: Leaving directory '/mnt/data/adahl/src/DistroKit/platform-v7a/build-
target/linux-5.2.9'
/usr/local/lib/ptxdist-2019.01.0/rules/kernel.make:174: recipe for target '/
home/adahl/src/DistroKit/platform-v7a/state/kernel.compile' failed
make: *** [/home/adahl/src/DistroKit/platform-v7a/state/kernel.compile] Error 
2


Am Freitag, 16. August 2019, 17:36:16 CEST schrieb Sebastian Andrzej Siewior:
> Dear RT folks!
> 
> I'm pleased to announce the v5.2.9-rt3 patch set.
> 
> Changes since v5.2.9-rt2:
> 
>   - The exynos5 i2c controller disabled IRQ threading as reported by
>     Benjamin Rouxel. The hix5hd2 i2c controller did the same.
> 
>   - A timer related to the deadline scheduler now fires in hard-irq
>     context. Patch by Juri Lelli.
> 
>   - A lock used the x86's thermal exception uses a raw_spinlock_t. Patch
>     by Clark Williams.
> 
>   - The DMA-reservation code is using now a sequence lock instead a
>     sequence counter. Yann Collette reported warnings from that area
>     with an AMD GPU.
> 
>   - Two kvm related timer on arm64 expire now hard-irq context. Reported
>     by Julien Grall, patched by Thomas Gleixner.
> 
>   - Lazy preemption was broken in a case on arm64, reported by Paul
>     Thomas. While investigating another lazy-preempt bug was fixed on
>     arm64 and x86.
> 
> Known issues
>      - rcutorture is currently broken on -RT. Reported by Juri Lelli.
> 
> The delta patch against v5.2.9-rt2 is appended below and can be found here:
> 
>     
> https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.2/incr/patch-5.2.9-rt
> 2-rt3.patch.xz
> 
> You can get this release via the git tree at:
> 
>     git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git
> v5.2.9-rt3
> 
> The RT patch against v5.2.9 can be found here:
> 
>    
> https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patch-5.2.9-r
> t3.patch.xz
> 
> The split quilt queue is available at:
> 
>    
> https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2.9
> -rt3.tar.xz
> 
> Sebastian
> 
> diff --git a/arch/arm64/include/asm/preempt.h
> b/arch/arm64/include/asm/preempt.h index 3bfad251203b5..ca1c6fe8dd347
> 100644
> --- a/arch/arm64/include/asm/preempt.h
> +++ b/arch/arm64/include/asm/preempt.h
> @@ -73,6 +73,8 @@ static inline bool __preempt_count_dec_and_test(void)
>  	if (!pc || !READ_ONCE(ti->preempt_count))
>  		return true;
>  #ifdef CONFIG_PREEMPT_LAZY
> +	if ((pc & ~PREEMPT_NEED_RESCHED))
> +		return false;
>  	if (current_thread_info()->preempt_lazy_count)
>  		return false;
>  	return test_thread_flag(TIF_NEED_RESCHED_LAZY);
> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
> index aa16cb43a779e..5d651c560bba6 100644
> --- a/arch/arm64/kernel/entry.S
> +++ b/arch/arm64/kernel/entry.S
> @@ -680,7 +680,8 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
>  	orr	x24, x24, x0
>  alternative_else_nop_endif
> 
> -	cbnz	x24, 2f					// preempt count != 0
> +	cbz	x24, 1f					// (need_resched + count) == 0
> +	cbnz	w24, 2f					// count != 0
> 
>  	ldr	w24, [tsk, #TSK_TI_PREEMPT_LAZY]	// get preempt lazy count
>  	cbnz	w24, 2f					// preempt lazy count != 0
> diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
> index f8e42abd874a4..9496299d23fc3 100644
> --- a/arch/x86/include/asm/preempt.h
> +++ b/arch/x86/include/asm/preempt.h
> @@ -99,6 +99,8 @@ static __always_inline bool
> __preempt_count_dec_and_test(void) if (____preempt_count_dec_and_test())
>  		return true;
>  #ifdef CONFIG_PREEMPT_LAZY
> +	if (preempt_count())
> +		return false;
>  	if (current_thread_info()->preempt_lazy_count)
>  		return false;
>  	return test_thread_flag(TIF_NEED_RESCHED_LAZY);
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index a6fee5a6e9fb2..27fffd65abe6b 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -168,7 +168,7 @@ static __poll_t dma_buf_poll(struct file *file,
> poll_table *poll) return 0;
> 
>  retry:
> -	seq = read_seqcount_begin(&resv->seq);
> +	seq = read_seqbegin(&resv->seq);
>  	rcu_read_lock();
> 
>  	fobj = rcu_dereference(resv->fence);
> @@ -177,7 +177,7 @@ static __poll_t dma_buf_poll(struct file *file,
> poll_table *poll) else
>  		shared_count = 0;
>  	fence_excl = rcu_dereference(resv->fence_excl);
> -	if (read_seqcount_retry(&resv->seq, seq)) {
> +	if (read_seqretry(&resv->seq, seq)) {
>  		rcu_read_unlock();
>  		goto retry;
>  	}
> @@ -1034,12 +1034,12 @@ static int dma_buf_debug_show(struct seq_file *s,
> void *unused)
> 
>  		robj = buf_obj->resv;
>  		while (true) {
> -			seq = read_seqcount_begin(&robj->seq);
> +			seq = read_seqbegin(&robj->seq);
>  			rcu_read_lock();
>  			fobj = rcu_dereference(robj->fence);
>  			shared_count = fobj ? fobj->shared_count : 0;
>  			fence = rcu_dereference(robj->fence_excl);
> -			if (!read_seqcount_retry(&robj->seq, seq))
> +			if (!read_seqretry(&robj->seq, seq))
>  				break;
>  			rcu_read_unlock();
>  		}
> diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
> index 4447e13d1e891..030c45ad3e56a 100644
> --- a/drivers/dma-buf/reservation.c
> +++ b/drivers/dma-buf/reservation.c
> @@ -110,15 +110,13 @@ int reservation_object_reserve_shared(struct
> reservation_object *obj, new->shared_count = j;
>  	new->shared_max = max;
> 
> -	preempt_disable();
> -	write_seqcount_begin(&obj->seq);
> +	write_seqlock(&obj->seq);
>  	/*
>  	 * RCU_INIT_POINTER can be used here,
>  	 * seqcount provides the necessary barriers
>  	 */
>  	RCU_INIT_POINTER(obj->fence, new);
> -	write_seqcount_end(&obj->seq);
> -	preempt_enable();
> +	write_sequnlock(&obj->seq);
> 
>  	if (!old)
>  		return 0;
> @@ -158,8 +156,7 @@ void reservation_object_add_shared_fence(struct
> reservation_object *obj, fobj = reservation_object_get_list(obj);
>  	count = fobj->shared_count;
> 
> -	preempt_disable();
> -	write_seqcount_begin(&obj->seq);
> +	write_seqlock(&obj->seq);
> 
>  	for (i = 0; i < count; ++i) {
>  		struct dma_fence *old_fence;
> @@ -181,8 +178,7 @@ void reservation_object_add_shared_fence(struct
> reservation_object *obj, /* pointer update must be visible before we extend
> the shared_count */ smp_store_mb(fobj->shared_count, count);
> 
> -	write_seqcount_end(&obj->seq);
> -	preempt_enable();
> +	write_sequnlock(&obj->seq);
>  }
>  EXPORT_SYMBOL(reservation_object_add_shared_fence);
> 
> @@ -209,14 +205,11 @@ void reservation_object_add_excl_fence(struct
> reservation_object *obj, if (fence)
>  		dma_fence_get(fence);
> 
> -	preempt_disable();
> -	write_seqcount_begin(&obj->seq);
> -	/* write_seqcount_begin provides the necessary memory barrier */
> +	write_seqlock(&obj->seq);
>  	RCU_INIT_POINTER(obj->fence_excl, fence);
>  	if (old)
>  		old->shared_count = 0;
> -	write_seqcount_end(&obj->seq);
> -	preempt_enable();
> +	write_sequnlock(&obj->seq);
> 
>  	/* inplace update, no shared fences */
>  	while (i--)
> @@ -298,13 +291,10 @@ int reservation_object_copy_fences(struct
> reservation_object *dst, src_list = reservation_object_get_list(dst);
>  	old = reservation_object_get_excl(dst);
> 
> -	preempt_disable();
> -	write_seqcount_begin(&dst->seq);
> -	/* write_seqcount_begin provides the necessary memory barrier */
> +	write_seqlock(&dst->seq);
>  	RCU_INIT_POINTER(dst->fence_excl, new);
>  	RCU_INIT_POINTER(dst->fence, dst_list);
> -	write_seqcount_end(&dst->seq);
> -	preempt_enable();
> +	write_sequnlock(&dst->seq);
> 
>  	if (src_list)
>  		kfree_rcu(src_list, rcu);
> @@ -345,7 +335,7 @@ int reservation_object_get_fences_rcu(struct
> reservation_object *obj, shared_count = i = 0;
> 
>  		rcu_read_lock();
> -		seq = read_seqcount_begin(&obj->seq);
> +		seq = read_seqbegin(&obj->seq);
> 
>  		fence_excl = rcu_dereference(obj->fence_excl);
>  		if (fence_excl && !dma_fence_get_rcu(fence_excl))
> @@ -394,7 +384,7 @@ int reservation_object_get_fences_rcu(struct
> reservation_object *obj, }
>  		}
> 
> -		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
> +		if (i != shared_count || read_seqretry(&obj->seq, seq)) {
>  			while (i--)
>  				dma_fence_put(shared[i]);
>  			dma_fence_put(fence_excl);
> @@ -443,7 +433,7 @@ long reservation_object_wait_timeout_rcu(struct
> reservation_object *obj,
> 
>  retry:
>  	shared_count = 0;
> -	seq = read_seqcount_begin(&obj->seq);
> +	seq = read_seqbegin(&obj->seq);
>  	rcu_read_lock();
>  	i = -1;
> 
> @@ -490,7 +480,7 @@ long reservation_object_wait_timeout_rcu(struct
> reservation_object *obj,
> 
>  	rcu_read_unlock();
>  	if (fence) {
> -		if (read_seqcount_retry(&obj->seq, seq)) {
> +		if (read_seqretry(&obj->seq, seq)) {
>  			dma_fence_put(fence);
>  			goto retry;
>  		}
> @@ -546,7 +536,7 @@ bool reservation_object_test_signaled_rcu(struct
> reservation_object *obj, retry:
>  	ret = true;
>  	shared_count = 0;
> -	seq = read_seqcount_begin(&obj->seq);
> +	seq = read_seqbegin(&obj->seq);
> 
>  	if (test_all) {
>  		unsigned i;
> @@ -567,7 +557,7 @@ bool reservation_object_test_signaled_rcu(struct
> reservation_object *obj, break;
>  		}
> 
> -		if (read_seqcount_retry(&obj->seq, seq))
> +		if (read_seqretry(&obj->seq, seq))
>  			goto retry;
>  	}
> 
> @@ -580,7 +570,7 @@ bool reservation_object_test_signaled_rcu(struct
> reservation_object *obj, if (ret < 0)
>  				goto retry;
> 
> -			if (read_seqcount_retry(&obj->seq, seq))
> +			if (read_seqretry(&obj->seq, seq))
>  				goto retry;
>  		}
>  	}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index
> 4b192e0ce92f4..be625817e5d95 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> @@ -250,11 +250,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct
> amdgpu_bo *bo, new->shared_count = k;
> 
>  	/* Install the new fence list, seqcount provides the barriers */
> -	preempt_disable();
> -	write_seqcount_begin(&resv->seq);
> +	write_seqlock(&resv->seq);
>  	RCU_INIT_POINTER(resv->fence, new);
> -	write_seqcount_end(&resv->seq);
> -	preempt_enable();
> +	write_sequnlock(&resv->seq);
> 
>  	/* Drop the references to the removed fences or move them to ef_list */
>  	for (i = j, k = 0; i < old->shared_count; ++i) {
> diff --git a/drivers/gpu/drm/i915/i915_gem.c
> b/drivers/gpu/drm/i915/i915_gem.c index ad01c92aaf748..2910a133077a3 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -449,7 +449,7 @@ i915_gem_object_wait_reservation(struct
> reservation_object *resv, unsigned int flags,
>  				 long timeout)
>  {
> -	unsigned int seq = __read_seqcount_begin(&resv->seq);
> +	unsigned int seq = read_seqbegin(&resv->seq);
>  	struct dma_fence *excl;
>  	bool prune_fences = false;
> 
> @@ -500,9 +500,9 @@ i915_gem_object_wait_reservation(struct
> reservation_object *resv, * signaled and that the reservation object has
> not been changed (i.e. * no new fences have been added).
>  	 */
> -	if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
> +	if (prune_fences && !read_seqretry(&resv->seq, seq)) {
>  		if (reservation_object_trylock(resv)) {
> -			if (!__read_seqcount_retry(&resv->seq, seq))
> +			if (!read_seqretry(&resv->seq, seq))
>  				reservation_object_add_excl_fence(resv, NULL);
>  			reservation_object_unlock(resv);
>  		}
> @@ -3943,7 +3943,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void
> *data, *
>  	 */
>  retry:
> -	seq = raw_read_seqcount(&obj->resv->seq);
> +	seq = read_seqbegin(&obj->resv->seq);
> 
>  	/* Translate the exclusive fence to the READ *and* WRITE engine */
>  	args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
> @@ -3961,7 +3961,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void
> *data, }
>  	}
> 
> -	if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
> +	if (args->busy && read_seqretry(&obj->resv->seq, seq))
>  		goto retry;
> 
>  	err = 0;
> diff --git a/drivers/i2c/busses/i2c-exynos5.c
> b/drivers/i2c/busses/i2c-exynos5.c index e4e7932f78000..e7514c16b756c
> 100644
> --- a/drivers/i2c/busses/i2c-exynos5.c
> +++ b/drivers/i2c/busses/i2c-exynos5.c
> @@ -791,9 +791,7 @@ static int exynos5_i2c_probe(struct platform_device
> *pdev) }
> 
>  	ret = devm_request_irq(&pdev->dev, i2c->irq, exynos5_i2c_irq,
> -				IRQF_NO_SUSPEND | IRQF_ONESHOT,
> -				dev_name(&pdev->dev), i2c);
> -
> +			       IRQF_NO_SUSPEND, dev_name(&pdev->dev), i2c);
>  	if (ret != 0) {
>  		dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", i2c->irq);
>  		goto err_clk;
> diff --git a/drivers/i2c/busses/i2c-hix5hd2.c
> b/drivers/i2c/busses/i2c-hix5hd2.c index 4df1434b3597d..8497c7a95dd44
> 100644
> --- a/drivers/i2c/busses/i2c-hix5hd2.c
> +++ b/drivers/i2c/busses/i2c-hix5hd2.c
> @@ -445,8 +445,7 @@ static int hix5hd2_i2c_probe(struct platform_device
> *pdev) hix5hd2_i2c_init(priv);
> 
>  	ret = devm_request_irq(&pdev->dev, irq, hix5hd2_i2c_irq,
> -			       IRQF_NO_SUSPEND | IRQF_ONESHOT,
> -			       dev_name(&pdev->dev), priv);
> +			       IRQF_NO_SUSPEND, dev_name(&pdev->dev), priv);
>  	if (ret != 0) {
>  		dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", irq);
>  		goto err_clk;
> diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c
> b/drivers/thermal/intel/x86_pkg_temp_thermal.c index
> 319b771261686..92ceed3de6f39 100644
> --- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
> +++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
> @@ -63,7 +63,7 @@ static int max_packages __read_mostly;
>  /* Array of package pointers */
>  static struct pkg_device **packages;
>  /* Serializes interrupt notification, work and hotplug */
> -static DEFINE_SPINLOCK(pkg_temp_lock);
> +static DEFINE_RAW_SPINLOCK(pkg_temp_lock);
>  /* Protects zone operation in the work function against hotplug removal */
>  static DEFINE_MUTEX(thermal_zone_mutex);
> 
> @@ -279,12 +279,12 @@ static void pkg_temp_thermal_threshold_work_fn(struct
> work_struct *work) u64 msr_val, wr_val;
> 
>  	mutex_lock(&thermal_zone_mutex);
> -	spin_lock_irq(&pkg_temp_lock);
> +	raw_spin_lock_irq(&pkg_temp_lock);
>  	++pkg_work_cnt;
> 
>  	pkgdev = pkg_temp_thermal_get_dev(cpu);
>  	if (!pkgdev) {
> -		spin_unlock_irq(&pkg_temp_lock);
> +		raw_spin_unlock_irq(&pkg_temp_lock);
>  		mutex_unlock(&thermal_zone_mutex);
>  		return;
>  	}
> @@ -298,7 +298,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct
> work_struct *work) }
> 
>  	enable_pkg_thres_interrupt();
> -	spin_unlock_irq(&pkg_temp_lock);
> +	raw_spin_unlock_irq(&pkg_temp_lock);
> 
>  	/*
>  	 * If tzone is not NULL, then thermal_zone_mutex will prevent the
> @@ -323,7 +323,7 @@ static int pkg_thermal_notify(u64 msr_val)
>  	struct pkg_device *pkgdev;
>  	unsigned long flags;
> 
> -	spin_lock_irqsave(&pkg_temp_lock, flags);
> +	raw_spin_lock_irqsave(&pkg_temp_lock, flags);
>  	++pkg_interrupt_cnt;
> 
>  	disable_pkg_thres_interrupt();
> @@ -335,7 +335,7 @@ static int pkg_thermal_notify(u64 msr_val)
>  		pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work);
>  	}
> 
> -	spin_unlock_irqrestore(&pkg_temp_lock, flags);
> +	raw_spin_unlock_irqrestore(&pkg_temp_lock, flags);
>  	return 0;
>  }
> 
> @@ -381,9 +381,9 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
> pkgdev->msr_pkg_therm_high);
> 
>  	cpumask_set_cpu(cpu, &pkgdev->cpumask);
> -	spin_lock_irq(&pkg_temp_lock);
> +	raw_spin_lock_irq(&pkg_temp_lock);
>  	packages[pkgid] = pkgdev;
> -	spin_unlock_irq(&pkg_temp_lock);
> +	raw_spin_unlock_irq(&pkg_temp_lock);
>  	return 0;
>  }
> 
> @@ -420,7 +420,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
>  	}
> 
>  	/* Protect against work and interrupts */
> -	spin_lock_irq(&pkg_temp_lock);
> +	raw_spin_lock_irq(&pkg_temp_lock);
> 
>  	/*
>  	 * Check whether this cpu was the current target and store the new
> @@ -452,9 +452,9 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
>  		 * To cancel the work we need to drop the lock, otherwise
>  		 * we might deadlock if the work needs to be flushed.
>  		 */
> -		spin_unlock_irq(&pkg_temp_lock);
> +		raw_spin_unlock_irq(&pkg_temp_lock);
>  		cancel_delayed_work_sync(&pkgdev->work);
> -		spin_lock_irq(&pkg_temp_lock);
> +		raw_spin_lock_irq(&pkg_temp_lock);
>  		/*
>  		 * If this is not the last cpu in the package and the work
>  		 * did not run after we dropped the lock above, then we
> @@ -465,7 +465,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
>  			pkg_thermal_schedule_work(target, &pkgdev->work);
>  	}
> 
> -	spin_unlock_irq(&pkg_temp_lock);
> +	raw_spin_unlock_irq(&pkg_temp_lock);
> 
>  	/* Final cleanup if this is the last cpu */
>  	if (lastcpu)
> diff --git a/include/linux/reservation.h b/include/linux/reservation.h
> index ee750765cc941..11cc05f489365 100644
> --- a/include/linux/reservation.h
> +++ b/include/linux/reservation.h
> @@ -71,7 +71,7 @@ struct reservation_object_list {
>   */
>  struct reservation_object {
>  	struct ww_mutex lock;
> -	seqcount_t seq;
> +	seqlock_t seq;
> 
>  	struct dma_fence __rcu *fence_excl;
>  	struct reservation_object_list __rcu *fence;
> @@ -90,7 +90,7 @@ reservation_object_init(struct reservation_object *obj)
>  {
>  	ww_mutex_init(&obj->lock, &reservation_ww_class);
> 
> -	__seqcount_init(&obj->seq, reservation_seqcount_string,
> &reservation_seqcount_class); +	seqlock_init(&obj->seq);
>  	RCU_INIT_POINTER(obj->fence, NULL);
>  	RCU_INIT_POINTER(obj->fence_excl, NULL);
>  }
> diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
> index c18be51f76088..1758f2a2d775a 100644
> --- a/kernel/sched/deadline.c
> +++ b/kernel/sched/deadline.c
> @@ -287,7 +287,7 @@ static void task_non_contending(struct task_struct *p)
> 
>  	dl_se->dl_non_contending = 1;
>  	get_task_struct(p);
> -	hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL);
> +	hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
>  }
> 
>  static void task_contending(struct sched_dl_entity *dl_se, int flags)
> @@ -1292,7 +1292,7 @@ void init_dl_inactive_task_timer(struct
> sched_dl_entity *dl_se) {
>  	struct hrtimer *timer = &dl_se->inactive_timer;
> 
> -	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
> +	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
>  	timer->function = inactive_task_timer;
>  }
> 
> diff --git a/localversion-rt b/localversion-rt
> index c3054d08a1129..1445cd65885cd 100644
> --- a/localversion-rt
> +++ b/localversion-rt
> @@ -1 +1 @@
> --rt2
> +-rt3
> diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
> index 1be486d5d7cb4..0bfa7c5b5c890 100644
> --- a/virt/kvm/arm/arch_timer.c
> +++ b/virt/kvm/arm/arch_timer.c
> @@ -80,7 +80,7 @@ static inline bool userspace_irqchip(struct kvm *kvm)
>  static void soft_timer_start(struct hrtimer *hrt, u64 ns)
>  {
>  	hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
> -		      HRTIMER_MODE_ABS);
> +		      HRTIMER_MODE_ABS_HARD);
>  }
> 
>  static void soft_timer_cancel(struct hrtimer *hrt)
> @@ -697,11 +697,11 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
>  	update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
>  	ptimer->cntvoff = 0;
> 
> -	hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
> +	hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
>  	timer->bg_timer.function = kvm_bg_timer_expire;
> 
> -	hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
> -	hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
> +	hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
> +	hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
>  	vtimer->hrtimer.function = kvm_hrtimer_expire;
>  	ptimer->hrtimer.function = kvm_hrtimer_expire;



^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [ANNOUNCE] v5.2.9-rt3
  2019-08-19 11:03 ` Alexander Dahl
@ 2019-08-20 15:44   ` Alexandre Belloni
  2019-08-21 13:25     ` Sebastian Andrzej Siewior
  0 siblings, 1 reply; 9+ messages in thread
From: Alexandre Belloni @ 2019-08-20 15:44 UTC (permalink / raw)
  To: Alexander Dahl
  Cc: linux-rt-users, Sebastian Andrzej Siewior, Thomas Gleixner, LKML,
	Steven Rostedt

Hi,

On 19/08/2019 13:03:51+0200, Alexander Dahl wrote:
> Hei hei,
> 
> just tried to compile this v5.2.9-rt3 for SAMA5D27-SOM1-EK1 based on 
> arch/arm/configs/sama5_defconfig and with running oldconfig and selecting 
> defaults, but that fails if CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK is not set. 
> 
> I think this is due to changes for Atmel TCLIB in v5.2 and the not yet adapted 
> RT patch "clocksource: TCLIB: Allow higher clock rates for clock events", 
> right?

Patch clocksource-tclib-allow-higher-clockrates.patch needs to be
changed so:

ret = setup_clkevents(tc, best_divisor_idx);

becomes

ret = setup_clkevents(&tc, best_divisor_idx);


Also, I would think clocksource-tclib-add-proper-depend.patch could be
dropped. Instead, setup_clkevents should use atmel_tcb_divisors. It
would then be necessary to move its declaration before the function.

Sebastian, can you take care of that or do you expect a patch? In the
latter case, do you want a patch for the patch?

> 
> What's the recommended setting of this option for RT?
> 

Using the slow clock, will make the platform wakeup less frequently,
having a higher clock rate will give a better clockevent resolution.

-- 
Alexandre Belloni, Bootlin
Embedded Linux and Kernel engineering
https://bootlin.com

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [ANNOUNCE] v5.2.9-rt3
  2019-08-20 15:44   ` Alexandre Belloni
@ 2019-08-21 13:25     ` Sebastian Andrzej Siewior
  2019-08-21 14:21       ` Alexandre Belloni
  0 siblings, 1 reply; 9+ messages in thread
From: Sebastian Andrzej Siewior @ 2019-08-21 13:25 UTC (permalink / raw)
  To: Alexandre Belloni
  Cc: Alexander Dahl, linux-rt-users, Thomas Gleixner, LKML, Steven Rostedt

On 2019-08-20 17:44:18 [+0200], Alexandre Belloni wrote:
> Hi,
Hi,

> On 19/08/2019 13:03:51+0200, Alexander Dahl wrote:
> > Hei hei,
> > 
> > just tried to compile this v5.2.9-rt3 for SAMA5D27-SOM1-EK1 based on 
> > arch/arm/configs/sama5_defconfig and with running oldconfig and selecting 
> > defaults, but that fails if CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK is not set. 
> > 
> > I think this is due to changes for Atmel TCLIB in v5.2 and the not yet adapted 
> > RT patch "clocksource: TCLIB: Allow higher clock rates for clock events", 
> > right?
> 
> Patch clocksource-tclib-allow-higher-clockrates.patch needs to be
> changed so:
> 
> ret = setup_clkevents(tc, best_divisor_idx);
> 
> becomes
> 
> ret = setup_clkevents(&tc, best_divisor_idx);
> 

I will fix that locally.

> Also, I would think clocksource-tclib-add-proper-depend.patch could be
> dropped. Instead, setup_clkevents should use atmel_tcb_divisors. It
> would then be necessary to move its declaration before the function.
> 
> Sebastian, can you take care of that or do you expect a patch? In the
> latter case, do you want a patch for the patch?

For the second part I would appreciate a patch. I can then drop
clocksource-tclib-add-proper-depend.patch if it is not an issue.

Sebastian

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [ANNOUNCE] v5.2.9-rt3
  2019-08-21 13:25     ` Sebastian Andrzej Siewior
@ 2019-08-21 14:21       ` Alexandre Belloni
  2019-08-21 14:42         ` Sebastian Andrzej Siewior
  0 siblings, 1 reply; 9+ messages in thread
From: Alexandre Belloni @ 2019-08-21 14:21 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior
  Cc: Alexander Dahl, linux-rt-users, Thomas Gleixner, LKML, Steven Rostedt

On 21/08/2019 15:25:54+0200, Sebastian Andrzej Siewior wrote:
> On 2019-08-20 17:44:18 [+0200], Alexandre Belloni wrote:
> > Hi,
> Hi,
> 
> > On 19/08/2019 13:03:51+0200, Alexander Dahl wrote:
> > > Hei hei,
> > > 
> > > just tried to compile this v5.2.9-rt3 for SAMA5D27-SOM1-EK1 based on 
> > > arch/arm/configs/sama5_defconfig and with running oldconfig and selecting 
> > > defaults, but that fails if CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK is not set. 
> > > 
> > > I think this is due to changes for Atmel TCLIB in v5.2 and the not yet adapted 
> > > RT patch "clocksource: TCLIB: Allow higher clock rates for clock events", 
> > > right?
> > 
> > Patch clocksource-tclib-allow-higher-clockrates.patch needs to be
> > changed so:
> > 
> > ret = setup_clkevents(tc, best_divisor_idx);
> > 
> > becomes
> > 
> > ret = setup_clkevents(&tc, best_divisor_idx);
> > 
> 
> I will fix that locally.
> 
> > Also, I would think clocksource-tclib-add-proper-depend.patch could be
> > dropped. Instead, setup_clkevents should use atmel_tcb_divisors. It
> > would then be necessary to move its declaration before the function.
> > 
> > Sebastian, can you take care of that or do you expect a patch? In the
> > latter case, do you want a patch for the patch?
> 
> For the second part I would appreciate a patch. I can then drop
> clocksource-tclib-add-proper-depend.patch if it is not an issue.
> 

I'm not sure it is worth it as the issue is introduced by
clocksource-tclib-allow-higher-clockrates.patch. Shouldn't we fix it
directly?

-- 
Alexandre Belloni, Bootlin
Embedded Linux and Kernel engineering
https://bootlin.com

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [ANNOUNCE] v5.2.9-rt3
  2019-08-21 14:21       ` Alexandre Belloni
@ 2019-08-21 14:42         ` Sebastian Andrzej Siewior
  2019-08-21 14:58           ` Alexandre Belloni
  0 siblings, 1 reply; 9+ messages in thread
From: Sebastian Andrzej Siewior @ 2019-08-21 14:42 UTC (permalink / raw)
  To: Alexandre Belloni
  Cc: Alexander Dahl, linux-rt-users, Thomas Gleixner, LKML, Steven Rostedt

On 2019-08-21 16:21:10 [+0200], Alexandre Belloni wrote:
> I'm not sure it is worth it as the issue is introduced by
> clocksource-tclib-allow-higher-clockrates.patch. Shouldn't we fix it
> directly?

you want to get rid of CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK and use
the highest possible frequency by default? 

Sebastian

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [ANNOUNCE] v5.2.9-rt3
  2019-08-21 14:42         ` Sebastian Andrzej Siewior
@ 2019-08-21 14:58           ` Alexandre Belloni
  2019-08-21 15:15             ` Sebastian Andrzej Siewior
  0 siblings, 1 reply; 9+ messages in thread
From: Alexandre Belloni @ 2019-08-21 14:58 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior
  Cc: Alexander Dahl, linux-rt-users, Thomas Gleixner, LKML, Steven Rostedt

On 21/08/2019 16:42:30+0200, Sebastian Andrzej Siewior wrote:
> On 2019-08-21 16:21:10 [+0200], Alexandre Belloni wrote:
> > I'm not sure it is worth it as the issue is introduced by
> > clocksource-tclib-allow-higher-clockrates.patch. Shouldn't we fix it
> > directly?
> 
> you want to get rid of CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK and use
> the highest possible frequency by default? 
> 

No, I meant the issue fixed by clocksource-tclib-add-proper-depend.patch
is introduced by clocksource-tclib-allow-higher-clockrates.patch so I
would think fixing clocksource-tclib-allow-higher-clockrates.patch is
preferable than having a separate patch.

But maybe you meant you wanted a patch to fix
clocksource-tclib-allow-higher-clockrates.patch

Hopefully, one day we will have a solution for that upstream (i.e. being
able to configure the clocksource and clockevent resolutions).

-- 
Alexandre Belloni, Bootlin
Embedded Linux and Kernel engineering
https://bootlin.com

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [ANNOUNCE] v5.2.9-rt3
  2019-08-21 14:58           ` Alexandre Belloni
@ 2019-08-21 15:15             ` Sebastian Andrzej Siewior
  2019-08-21 16:19               ` Alexandre Belloni
  0 siblings, 1 reply; 9+ messages in thread
From: Sebastian Andrzej Siewior @ 2019-08-21 15:15 UTC (permalink / raw)
  To: Alexandre Belloni
  Cc: Alexander Dahl, linux-rt-users, Thomas Gleixner, LKML, Steven Rostedt

On 2019-08-21 16:58:37 [+0200], Alexandre Belloni wrote:
> On 21/08/2019 16:42:30+0200, Sebastian Andrzej Siewior wrote:
> > On 2019-08-21 16:21:10 [+0200], Alexandre Belloni wrote:
> > > I'm not sure it is worth it as the issue is introduced by
> > > clocksource-tclib-allow-higher-clockrates.patch. Shouldn't we fix it
> > > directly?
> > 
> > you want to get rid of CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK and use
> > the highest possible frequency by default? 
> > 
> 
> No, I meant the issue fixed by clocksource-tclib-add-proper-depend.patch
> is introduced by clocksource-tclib-allow-higher-clockrates.patch so I
> would think fixing clocksource-tclib-allow-higher-clockrates.patch is
> preferable than having a separate patch.
> 
> But maybe you meant you wanted a patch to fix
> clocksource-tclib-allow-higher-clockrates.patch

got it. So clocksource-tclib-allow-higher-clockrates.patch becomes:

--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -424,11 +424,18 @@ config ATMEL_ST
 
 config ATMEL_TCB_CLKSRC
 	bool "Atmel TC Block timer driver" if COMPILE_TEST
-	depends on HAS_IOMEM
+	depends on HAS_IOMEM && ATMEL_TCLIB
 	select TIMER_OF if OF
 	help
 	  Support for Timer Counter Blocks on Atmel SoCs.
 
+config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+	bool "TC Block use 32 KiHz clock"
+	depends on ATMEL_TCB_CLKSRC
+	default y
+	help
+	  Select this to use 32 KiHz base clock rate as TC block clock.
+
 config CLKSRC_EXYNOS_MCT
 	bool "Exynos multi core timer driver" if COMPILE_TEST
 	depends on ARM || ARM64
--- a/drivers/clocksource/timer-atmel-tcb.c
+++ b/drivers/clocksource/timer-atmel-tcb.c
@@ -27,8 +27,7 @@
  *     this 32 bit free-running counter. the second channel is not used.
  *
  *   - The third channel may be used to provide a 16-bit clockevent
- *     source, used in either periodic or oneshot mode.  This runs
- *     at 32 KiHZ, and can handle delays of up to two seconds.
+ *     source, used in either periodic or oneshot mode.
  *
  * REVISIT behavior during system suspend states... we should disable
  * all clocks and save the power.  Easily done for clockevent devices,
@@ -131,6 +130,7 @@ struct tc_clkevt_device {
 	struct clock_event_device	clkevt;
 	struct clk			*clk;
 	bool				clk_enabled;
+	u32				freq;
 	void __iomem			*regs;
 };
 
@@ -139,13 +139,6 @@ static struct tc_clkevt_device *to_tc_cl
 	return container_of(clkevt, struct tc_clkevt_device, clkevt);
 }
 
-/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
- * because using one of the divided clocks would usually mean the
- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
- *
- * A divided clock could be good for high resolution timers, since
- * 30.5 usec resolution can seem "low".
- */
 static u32 timer_clock;
 
 static void tc_clk_disable(struct clock_event_device *d)
@@ -195,7 +188,7 @@ static int tc_set_oneshot(struct clock_e
 
 	tc_clk_enable(d);
 
-	/* slow clock, count up to RC, then irq and stop */
+	/* count up to RC, then irq and stop */
 	writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
 		     ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
 	writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
@@ -217,10 +210,10 @@ static int tc_set_periodic(struct clock_
 	 */
 	tc_clk_enable(d);
 
-	/* slow clock, count up to RC, then irq and restart */
+	/* count up to RC, then irq and restart */
 	writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
 		     regs + ATMEL_TC_REG(2, CMR));
-	writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
+	writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
 
 	/* Enable clock and interrupts on RC compare */
 	writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
@@ -246,7 +239,11 @@ static struct tc_clkevt_device clkevt =
 		.features		= CLOCK_EVT_FEAT_PERIODIC |
 					  CLOCK_EVT_FEAT_ONESHOT,
 		/* Should be lower than at91rm9200's system timer */
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
 		.rating			= 125,
+#else
+		.rating			= 200,
+#endif
 		.set_next_event		= tc_next_event,
 		.set_state_shutdown	= tc_shutdown_clk_off,
 		.set_state_periodic	= tc_set_periodic,
@@ -268,8 +265,9 @@ static irqreturn_t ch2_irq(int irq, void
 	return IRQ_NONE;
 }
 
-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
 {
+	unsigned divisor = atmel_tc_divisors[divisor_idx];
 	int ret;
 	struct clk *t2_clk = tc->clk[2];
 	int irq = tc->irq[2];
@@ -290,7 +288,11 @@ static int __init setup_clkevents(struct
 	clkevt.regs = tc->regs;
 	clkevt.clk = t2_clk;
 
-	timer_clock = clk32k_divisor_idx;
+	timer_clock = divisor_idx;
+	if (!divisor)
+		clkevt.freq = 32768;
+	else
+		clkevt.freq = clk_get_rate(t2_clk) / divisor;
 
 	clkevt.clkevt.cpumask = cpumask_of(0);
 
@@ -301,7 +303,7 @@ static int __init setup_clkevents(struct
 		return ret;
 	}
 
-	clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
+	clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff);
 
 	return ret;
 }
@@ -477,7 +479,11 @@ static int __init tcb_clksrc_init(struct
 		goto err_disable_t1;
 
 	/* channel 2:  periodic and oneshot timer support */
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
 	ret = setup_clkevents(&tc, clk32k_divisor_idx);
+#else
+	ret = setup_clkevents(&tc, best_divisor_idx);
+#endif
 	if (ret)
 		goto err_unregister_clksrc;
 


> 
> Hopefully, one day we will have a solution for that upstream (i.e. being
> able to configure the clocksource and clockevent resolutions).

Hopefully.

Sebastian

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [ANNOUNCE] v5.2.9-rt3
  2019-08-21 15:15             ` Sebastian Andrzej Siewior
@ 2019-08-21 16:19               ` Alexandre Belloni
  0 siblings, 0 replies; 9+ messages in thread
From: Alexandre Belloni @ 2019-08-21 16:19 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior
  Cc: Alexander Dahl, linux-rt-users, Thomas Gleixner, LKML, Steven Rostedt

[-- Attachment #1: Type: text/plain, Size: 1436 bytes --]

On 21/08/2019 17:15:30+0200, Sebastian Andrzej Siewior wrote:
> On 2019-08-21 16:58:37 [+0200], Alexandre Belloni wrote:
> > On 21/08/2019 16:42:30+0200, Sebastian Andrzej Siewior wrote:
> > > On 2019-08-21 16:21:10 [+0200], Alexandre Belloni wrote:
> > > > I'm not sure it is worth it as the issue is introduced by
> > > > clocksource-tclib-allow-higher-clockrates.patch. Shouldn't we fix it
> > > > directly?
> > > 
> > > you want to get rid of CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK and use
> > > the highest possible frequency by default? 
> > > 
> > 
> > No, I meant the issue fixed by clocksource-tclib-add-proper-depend.patch
> > is introduced by clocksource-tclib-allow-higher-clockrates.patch so I
> > would think fixing clocksource-tclib-allow-higher-clockrates.patch is
> > preferable than having a separate patch.
> > 
> > But maybe you meant you wanted a patch to fix
> > clocksource-tclib-allow-higher-clockrates.patch
> 
> got it. So clocksource-tclib-allow-higher-clockrates.patch becomes:
> 
> --- a/drivers/clocksource/Kconfig
> +++ b/drivers/clocksource/Kconfig
> @@ -424,11 +424,18 @@ config ATMEL_ST
>  
>  config ATMEL_TCB_CLKSRC
>  	bool "Atmel TC Block timer driver" if COMPILE_TEST
> -	depends on HAS_IOMEM
> +	depends on HAS_IOMEM && ATMEL_TCLIB

Nope, this dependency is not necessary, please find the patch attached.


-- 
Alexandre Belloni, Bootlin
Embedded Linux and Kernel engineering
https://bootlin.com

[-- Attachment #2: 0001-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch --]
[-- Type: text/plain, Size: 6080 bytes --]

From c74e8f0fe04117ab53a38bbc0304d05525f9f0de Mon Sep 17 00:00:00 2001
From: Benedikt Spranger <b.spranger@linutronix.de>
Date: Mon, 8 Mar 2010 18:57:04 +0100
Subject: [PATCH] clocksource: TCLIB: Allow higher clock rates for clock events
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

As default the TCLIB uses the 32KiHz base clock rate for clock events.
Add a compile time selection to allow higher clock resulution.

(fixed up by Sami Pietikäinen <Sami.Pietikainen@wapice.com>)

Signed-off-by: Benedikt Spranger <b.spranger@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 drivers/clocksource/Kconfig           |  7 +++++
 drivers/clocksource/timer-atmel-tcb.c | 40 +++++++++++++++------------
 2 files changed, 30 insertions(+), 17 deletions(-)

diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 3300739edce4..2927b673caa6 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -429,6 +429,13 @@ config ATMEL_TCB_CLKSRC
 	help
 	  Support for Timer Counter Blocks on Atmel SoCs.
 
+config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+	bool "TC Block use 32 KiHz clock"
+	depends on ATMEL_TCB_CLKSRC
+	default y
+	help
+	  Select this to use 32 KiHz base clock rate as TC block clock.
+
 config CLKSRC_EXYNOS_MCT
 	bool "Exynos multi core timer driver" if COMPILE_TEST
 	depends on ARM || ARM64
diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c
index 8bc83c346bad..1a5abc178b65 100644
--- a/drivers/clocksource/timer-atmel-tcb.c
+++ b/drivers/clocksource/timer-atmel-tcb.c
@@ -27,8 +27,7 @@
  *     this 32 bit free-running counter. the second channel is not used.
  *
  *   - The third channel may be used to provide a 16-bit clockevent
- *     source, used in either periodic or oneshot mode.  This runs
- *     at 32 KiHZ, and can handle delays of up to two seconds.
+ *     source, used in either periodic or oneshot mode.
  *
  * REVISIT behavior during system suspend states... we should disable
  * all clocks and save the power.  Easily done for clockevent devices,
@@ -131,6 +130,7 @@ struct tc_clkevt_device {
 	struct clock_event_device	clkevt;
 	struct clk			*clk;
 	bool				clk_enabled;
+	u32				freq;
 	void __iomem			*regs;
 };
 
@@ -139,13 +139,6 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
 	return container_of(clkevt, struct tc_clkevt_device, clkevt);
 }
 
-/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
- * because using one of the divided clocks would usually mean the
- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
- *
- * A divided clock could be good for high resolution timers, since
- * 30.5 usec resolution can seem "low".
- */
 static u32 timer_clock;
 
 static void tc_clk_disable(struct clock_event_device *d)
@@ -195,7 +188,7 @@ static int tc_set_oneshot(struct clock_event_device *d)
 
 	tc_clk_enable(d);
 
-	/* slow clock, count up to RC, then irq and stop */
+	/* count up to RC, then irq and stop */
 	writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
 		     ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
 	writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
@@ -217,10 +210,10 @@ static int tc_set_periodic(struct clock_event_device *d)
 	 */
 	tc_clk_enable(d);
 
-	/* slow clock, count up to RC, then irq and restart */
+	/* count up to RC, then irq and restart */
 	writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
 		     regs + ATMEL_TC_REG(2, CMR));
-	writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
+	writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
 
 	/* Enable clock and interrupts on RC compare */
 	writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
@@ -246,7 +239,11 @@ static struct tc_clkevt_device clkevt = {
 		.features		= CLOCK_EVT_FEAT_PERIODIC |
 					  CLOCK_EVT_FEAT_ONESHOT,
 		/* Should be lower than at91rm9200's system timer */
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
 		.rating			= 125,
+#else
+		.rating			= 200,
+#endif
 		.set_next_event		= tc_next_event,
 		.set_state_shutdown	= tc_shutdown_clk_off,
 		.set_state_periodic	= tc_set_periodic,
@@ -268,8 +265,11 @@ static irqreturn_t ch2_irq(int irq, void *handle)
 	return IRQ_NONE;
 }
 
-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+static const u8 atmel_tcb_divisors[5] = { 2, 8, 32, 128, 0, };
+
+static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
 {
+	unsigned divisor = atmel_tcb_divisors[divisor_idx];
 	int ret;
 	struct clk *t2_clk = tc->clk[2];
 	int irq = tc->irq[2];
@@ -290,7 +290,11 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
 	clkevt.regs = tc->regs;
 	clkevt.clk = t2_clk;
 
-	timer_clock = clk32k_divisor_idx;
+	timer_clock = divisor_idx;
+	if (!divisor)
+		clkevt.freq = 32768;
+	else
+		clkevt.freq = clk_get_rate(t2_clk) / divisor;
 
 	clkevt.clkevt.cpumask = cpumask_of(0);
 
@@ -301,7 +305,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
 		return ret;
 	}
 
-	clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
+	clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff);
 
 	return ret;
 }
@@ -358,8 +362,6 @@ static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_id
 	writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
 }
 
-static const u8 atmel_tcb_divisors[5] = { 2, 8, 32, 128, 0, };
-
 static const struct of_device_id atmel_tcb_of_match[] = {
 	{ .compatible = "atmel,at91rm9200-tcb", .data = (void *)16, },
 	{ .compatible = "atmel,at91sam9x5-tcb", .data = (void *)32, },
@@ -477,7 +479,11 @@ static int __init tcb_clksrc_init(struct device_node *node)
 		goto err_disable_t1;
 
 	/* channel 2:  periodic and oneshot timer support */
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
 	ret = setup_clkevents(&tc, clk32k_divisor_idx);
+#else
+	ret = setup_clkevents(&tc, best_divisor_idx);
+#endif
 	if (ret)
 		goto err_unregister_clksrc;
 
-- 
2.21.0


^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2019-08-21 16:19 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-08-16 15:36 [ANNOUNCE] v5.2.9-rt3 Sebastian Andrzej Siewior
2019-08-19 11:03 ` Alexander Dahl
2019-08-20 15:44   ` Alexandre Belloni
2019-08-21 13:25     ` Sebastian Andrzej Siewior
2019-08-21 14:21       ` Alexandre Belloni
2019-08-21 14:42         ` Sebastian Andrzej Siewior
2019-08-21 14:58           ` Alexandre Belloni
2019-08-21 15:15             ` Sebastian Andrzej Siewior
2019-08-21 16:19               ` Alexandre Belloni

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).