aboutsummaryrefslogtreecommitdiffstats
path: root/lib/flex_proportions.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/flex_proportions.c')
-rw-r--r--lib/flex_proportions.c45
1 files changed, 25 insertions, 20 deletions
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index 7852bfff50b1..83332fefa6f4 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -63,18 +63,14 @@ void fprop_global_destroy(struct fprop_global *p)
*/
bool fprop_new_period(struct fprop_global *p, int periods)
{
- s64 events;
- unsigned long flags;
+ s64 events = percpu_counter_sum(&p->events);
- local_irq_save(flags);
- events = percpu_counter_sum(&p->events);
/*
* Don't do anything if there are no events.
*/
- if (events <= 1) {
- local_irq_restore(flags);
+ if (events <= 1)
return false;
- }
+ preempt_disable_nested();
write_seqcount_begin(&p->sequence);
if (periods < 64)
events -= events >> periods;
@@ -82,7 +78,7 @@ bool fprop_new_period(struct fprop_global *p, int periods)
percpu_counter_add(&p->events, -events);
p->period += periods;
write_seqcount_end(&p->sequence);
- local_irq_restore(flags);
+ preempt_enable_nested();
return true;
}
@@ -217,11 +213,12 @@ static void fprop_reflect_period_percpu(struct fprop_global *p,
}
/* Event of type pl happened */
-void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
+void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
+ long nr)
{
fprop_reflect_period_percpu(p, pl);
- percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
- percpu_counter_add(&p->events, 1);
+ percpu_counter_add_batch(&pl->events, nr, PROP_BATCH);
+ percpu_counter_add(&p->events, nr);
}
void fprop_fraction_percpu(struct fprop_global *p,
@@ -253,21 +250,29 @@ void fprop_fraction_percpu(struct fprop_global *p,
}
/*
- * Like __fprop_inc_percpu() except that event is counted only if the given
+ * Like __fprop_add_percpu() except that event is counted only if the given
* type has fraction smaller than @max_frac/FPROP_FRAC_BASE
*/
-void __fprop_inc_percpu_max(struct fprop_global *p,
- struct fprop_local_percpu *pl, int max_frac)
+void __fprop_add_percpu_max(struct fprop_global *p,
+ struct fprop_local_percpu *pl, int max_frac, long nr)
{
if (unlikely(max_frac < FPROP_FRAC_BASE)) {
unsigned long numerator, denominator;
+ s64 tmp;
fprop_fraction_percpu(p, pl, &numerator, &denominator);
- if (numerator >
- (((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT)
+ /* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */
+ tmp = (u64)denominator * max_frac -
+ ((u64)numerator << FPROP_FRAC_SHIFT);
+ if (tmp < 0) {
+ /* Maximum fraction already exceeded? */
return;
- } else
- fprop_reflect_period_percpu(p, pl);
- percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
- percpu_counter_add(&p->events, 1);
+ } else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) {
+ /* Add just enough for the fraction to saturate */
+ nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1,
+ FPROP_FRAC_BASE - max_frac);
+ }
+ }
+
+ __fprop_add_percpu(p, pl, nr);
}