aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/verifier.c10
-rw-r--r--kernel/cgroup/cpuset.c2
-rw-r--r--kernel/debug/kdb/kdb_io.c99
-rw-r--r--kernel/dma/map_benchmark.c6
-rw-r--r--kernel/irq/cpuhotplug.c16
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c53
-rw-r--r--kernel/sched/topology.c2
-rw-r--r--kernel/softirq.c12
-rw-r--r--kernel/trace/ring_buffer.c9
10 files changed, 128 insertions, 83 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 94d952967fbf..07ca1157f97c 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5568,7 +5568,8 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
enum bpf_attach_type eatype = env->prog->expected_attach_type;
enum bpf_prog_type type = resolve_prog_type(env->prog);
- if (func_id != BPF_FUNC_map_update_elem)
+ if (func_id != BPF_FUNC_map_update_elem &&
+ func_id != BPF_FUNC_map_delete_elem)
return false;
/* It's not possible to get access to a locked struct sock in these
@@ -5579,6 +5580,11 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
if (eatype == BPF_TRACE_ITER)
return true;
break;
+ case BPF_PROG_TYPE_SOCK_OPS:
+ /* map_update allowed only via dedicated helpers with event type checks */
+ if (func_id == BPF_FUNC_map_delete_elem)
+ return true;
+ break;
case BPF_PROG_TYPE_SOCKET_FILTER:
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
@@ -5666,7 +5672,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_MAP_TYPE_SOCKMAP:
if (func_id != BPF_FUNC_sk_redirect_map &&
func_id != BPF_FUNC_sock_map_update &&
- func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_map &&
func_id != BPF_FUNC_sk_select_reuseport &&
func_id != BPF_FUNC_map_lookup_elem &&
@@ -5676,7 +5681,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_MAP_TYPE_SOCKHASH:
if (func_id != BPF_FUNC_sk_redirect_hash &&
func_id != BPF_FUNC_sock_hash_update &&
- func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_hash &&
func_id != BPF_FUNC_sk_select_reuseport &&
func_id != BPF_FUNC_map_lookup_elem &&
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 6905079c15c2..82df5a07a869 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1938,7 +1938,7 @@ bool current_cpuset_is_being_rebound(void)
static int update_relax_domain_level(struct cpuset *cs, s64 val)
{
#ifdef CONFIG_SMP
- if (val < -1 || val >= sched_domain_level_max)
+ if (val < -1 || val > sched_domain_level_max + 1)
return -EINVAL;
#endif
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 6735ac36b718..a3b4b55d2e2e 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -172,6 +172,33 @@ char kdb_getchar(void)
unreachable();
}
+/**
+ * kdb_position_cursor() - Place cursor in the correct horizontal position
+ * @prompt: Nil-terminated string containing the prompt string
+ * @buffer: Nil-terminated string containing the entire command line
+ * @cp: Cursor position, pointer the character in buffer where the cursor
+ * should be positioned.
+ *
+ * The cursor is positioned by sending a carriage-return and then printing
+ * the content of the line until we reach the correct cursor position.
+ *
+ * There is some additional fine detail here.
+ *
+ * Firstly, even though kdb_printf() will correctly format zero-width fields
+ * we want the second call to kdb_printf() to be conditional. That keeps things
+ * a little cleaner when LOGGING=1.
+ *
+ * Secondly, we can't combine everything into one call to kdb_printf() since
+ * that renders into a fixed length buffer and the combined print could result
+ * in unwanted truncation.
+ */
+static void kdb_position_cursor(char *prompt, char *buffer, char *cp)
+{
+ kdb_printf("\r%s", kdb_prompt_str);
+ if (cp > buffer)
+ kdb_printf("%.*s", (int)(cp - buffer), buffer);
+}
+
/*
* kdb_read
*
@@ -200,7 +227,6 @@ static char *kdb_read(char *buffer, size_t bufsize)
* and null byte */
char *lastchar;
char *p_tmp;
- char tmp;
static char tmpbuffer[CMD_BUFLEN];
int len = strlen(buffer);
int len_tmp;
@@ -237,12 +263,8 @@ poll_again:
}
*(--lastchar) = '\0';
--cp;
- kdb_printf("\b%s \r", cp);
- tmp = *cp;
- *cp = '\0';
- kdb_printf(kdb_prompt_str);
- kdb_printf("%s", buffer);
- *cp = tmp;
+ kdb_printf("\b%s ", cp);
+ kdb_position_cursor(kdb_prompt_str, buffer, cp);
}
break;
case 13: /* enter */
@@ -259,19 +281,14 @@ poll_again:
memcpy(tmpbuffer, cp+1, lastchar - cp - 1);
memcpy(cp, tmpbuffer, lastchar - cp - 1);
*(--lastchar) = '\0';
- kdb_printf("%s \r", cp);
- tmp = *cp;
- *cp = '\0';
- kdb_printf(kdb_prompt_str);
- kdb_printf("%s", buffer);
- *cp = tmp;
+ kdb_printf("%s ", cp);
+ kdb_position_cursor(kdb_prompt_str, buffer, cp);
}
break;
case 1: /* Home */
if (cp > buffer) {
- kdb_printf("\r");
- kdb_printf(kdb_prompt_str);
cp = buffer;
+ kdb_position_cursor(kdb_prompt_str, buffer, cp);
}
break;
case 5: /* End */
@@ -287,11 +304,10 @@ poll_again:
}
break;
case 14: /* Down */
- memset(tmpbuffer, ' ',
- strlen(kdb_prompt_str) + (lastchar-buffer));
- *(tmpbuffer+strlen(kdb_prompt_str) +
- (lastchar-buffer)) = '\0';
- kdb_printf("\r%s\r", tmpbuffer);
+ case 16: /* Up */
+ kdb_printf("\r%*c\r",
+ (int)(strlen(kdb_prompt_str) + (lastchar - buffer)),
+ ' ');
*lastchar = (char)key;
*(lastchar+1) = '\0';
return lastchar;
@@ -301,15 +317,6 @@ poll_again:
++cp;
}
break;
- case 16: /* Up */
- memset(tmpbuffer, ' ',
- strlen(kdb_prompt_str) + (lastchar-buffer));
- *(tmpbuffer+strlen(kdb_prompt_str) +
- (lastchar-buffer)) = '\0';
- kdb_printf("\r%s\r", tmpbuffer);
- *lastchar = (char)key;
- *(lastchar+1) = '\0';
- return lastchar;
case 9: /* Tab */
if (tab < 2)
++tab;
@@ -353,15 +360,25 @@ poll_again:
kdb_printf("\n");
kdb_printf(kdb_prompt_str);
kdb_printf("%s", buffer);
+ if (cp != lastchar)
+ kdb_position_cursor(kdb_prompt_str, buffer, cp);
} else if (tab != 2 && count > 0) {
- len_tmp = strlen(p_tmp);
- strncpy(p_tmp+len_tmp, cp, lastchar-cp+1);
- len_tmp = strlen(p_tmp);
- strncpy(cp, p_tmp+len, len_tmp-len + 1);
- len = len_tmp - len;
- kdb_printf("%s", cp);
- cp += len;
- lastchar += len;
+ /* How many new characters do we want from tmpbuffer? */
+ len_tmp = strlen(p_tmp) - len;
+ if (lastchar + len_tmp >= bufend)
+ len_tmp = bufend - lastchar;
+
+ if (len_tmp) {
+ /* + 1 ensures the '\0' is memmove'd */
+ memmove(cp+len_tmp, cp, (lastchar-cp) + 1);
+ memcpy(cp, p_tmp+len, len_tmp);
+ kdb_printf("%s", cp);
+ cp += len_tmp;
+ lastchar += len_tmp;
+ if (cp != lastchar)
+ kdb_position_cursor(kdb_prompt_str,
+ buffer, cp);
+ }
}
kdb_nextline = 1; /* reset output line number */
break;
@@ -372,13 +389,9 @@ poll_again:
memcpy(cp+1, tmpbuffer, lastchar - cp);
*++lastchar = '\0';
*cp = key;
- kdb_printf("%s\r", cp);
+ kdb_printf("%s", cp);
++cp;
- tmp = *cp;
- *cp = '\0';
- kdb_printf(kdb_prompt_str);
- kdb_printf("%s", buffer);
- *cp = tmp;
+ kdb_position_cursor(kdb_prompt_str, buffer, cp);
} else {
*++lastchar = '\0';
*cp++ = key;
diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
index 9b9af1bd6be3..b7f8bb7a1e5c 100644
--- a/kernel/dma/map_benchmark.c
+++ b/kernel/dma/map_benchmark.c
@@ -124,7 +124,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
struct task_struct **tsk;
int threads = map->bparam.threads;
int node = map->bparam.node;
- const cpumask_t *cpu_mask = cpumask_of_node(node);
u64 loops;
int ret = 0;
int i;
@@ -145,7 +144,7 @@ static int do_map_benchmark(struct map_benchmark_data *map)
}
if (node != NUMA_NO_NODE)
- kthread_bind_mask(tsk[i], cpu_mask);
+ kthread_bind_mask(tsk[i], cpumask_of_node(node));
}
/* clear the old value in the previous benchmark */
@@ -231,7 +230,8 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
}
if (map->bparam.node != NUMA_NO_NODE &&
- !node_possible(map->bparam.node)) {
+ (map->bparam.node < 0 || map->bparam.node >= MAX_NUMNODES ||
+ !node_possible(map->bparam.node))) {
pr_err("invalid numa node\n");
return -EINVAL;
}
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 39a41c56ad4f..24fd9db84cfb 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -70,6 +70,14 @@ static bool migrate_one_irq(struct irq_desc *desc)
}
/*
+ * Complete an eventually pending irq move cleanup. If this
+ * interrupt was moved in hard irq context, then the vectors need
+ * to be cleaned up. It can't wait until this interrupt actually
+ * happens and this CPU was involved.
+ */
+ irq_force_complete_move(desc);
+
+ /*
* No move required, if:
* - Interrupt is per cpu
* - Interrupt is not started
@@ -88,14 +96,6 @@ static bool migrate_one_irq(struct irq_desc *desc)
}
/*
- * Complete an eventually pending irq move cleanup. If this
- * interrupt was moved in hard irq context, then the vectors need
- * to be cleaned up. It can't wait until this interrupt actually
- * happens and this CPU was involved.
- */
- irq_force_complete_move(desc);
-
- /*
* If there is a setaffinity pending, then try to reuse the pending
* mask, so the last change of the affinity does not get lost. If
* there is no move pending or the pending mask does not contain
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 25b8ea91168e..b43da6201b9a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -10828,7 +10828,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of,
{
struct task_group *tg = css_tg(of_css(of));
u64 period = tg_get_cfs_period(tg);
- u64 burst = tg_get_cfs_burst(tg);
+ u64 burst = tg->cfs_bandwidth.burst;
u64 quota;
int ret;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4a1393405a6f..94fcd585eb7f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5746,21 +5746,41 @@ static inline unsigned long cpu_util(int cpu);
static inline bool cpu_overutilized(int cpu)
{
- unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
- unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
+ unsigned long rq_util_min, rq_util_max;
+
+ if (!sched_energy_enabled())
+ return false;
+
+ rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
+ rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
return !util_fits_cpu(cpu_util(cpu), rq_util_min, rq_util_max, cpu);
}
-static inline void update_overutilized_status(struct rq *rq)
+static inline void set_rd_overutilized_status(struct root_domain *rd,
+ unsigned int status)
{
- if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
- WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED);
- trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED);
- }
+ if (!sched_energy_enabled())
+ return;
+
+ WRITE_ONCE(rd->overutilized, status);
+ trace_sched_overutilized_tp(rd, !!status);
+}
+
+static inline void check_update_overutilized_status(struct rq *rq)
+{
+ /*
+ * overutilized field is used for load balancing decisions only
+ * if energy aware scheduler is being used
+ */
+ if (!sched_energy_enabled())
+ return;
+
+ if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu))
+ set_rd_overutilized_status(rq->rd, SG_OVERUTILIZED);
}
#else
-static inline void update_overutilized_status(struct rq *rq) { }
+static inline void check_update_overutilized_status(struct rq *rq) { }
#endif
/* Runqueue only has SCHED_IDLE tasks enqueued */
@@ -5868,7 +5888,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
* and the following generally works well enough in practice.
*/
if (!task_new)
- update_overutilized_status(rq);
+ check_update_overutilized_status(rq);
enqueue_throttle:
if (cfs_bandwidth_used()) {
@@ -9577,19 +9597,14 @@ next_group:
env->fbq_type = fbq_classify_group(&sds->busiest_stat);
if (!env->sd->parent) {
- struct root_domain *rd = env->dst_rq->rd;
-
/* update overload indicator if we are at root domain */
- WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD);
+ WRITE_ONCE(env->dst_rq->rd->overload, sg_status & SG_OVERLOAD);
/* Update over-utilization (tipping point, U >= 0) indicator */
- WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED);
- trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED);
+ set_rd_overutilized_status(env->dst_rq->rd,
+ sg_status & SG_OVERUTILIZED);
} else if (sg_status & SG_OVERUTILIZED) {
- struct root_domain *rd = env->dst_rq->rd;
-
- WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED);
- trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
+ set_rd_overutilized_status(env->dst_rq->rd, SG_OVERUTILIZED);
}
update_idle_cpu_scan(env, sum_util);
@@ -11460,7 +11475,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
task_tick_numa(rq, curr);
update_misfit_status(curr, rq);
- update_overutilized_status(task_rq(curr));
+ check_update_overutilized_status(task_rq(curr));
task_tick_core(rq, curr);
}
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 4e8698e62f07..8c82ca3aa652 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1405,7 +1405,7 @@ static void set_domain_attribute(struct sched_domain *sd,
} else
request = attr->relax_domain_level;
- if (sd->level > request) {
+ if (sd->level >= request) {
/* Turn off idle balance on this domain: */
sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 41f470929e99..dc60f0c66a25 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -512,7 +512,7 @@ static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif
-asmlinkage __visible void __softirq_entry __do_softirq(void)
+static void handle_softirqs(bool ksirqd)
{
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
unsigned long old_flags = current->flags;
@@ -567,8 +567,7 @@ restart:
pending >>= softirq_bit;
}
- if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
- __this_cpu_read(ksoftirqd) == current)
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && ksirqd)
rcu_softirq_qs();
local_irq_disable();
@@ -588,6 +587,11 @@ restart:
current_restore_flags(old_flags, PF_MEMALLOC);
}
+asmlinkage __visible void __softirq_entry __do_softirq(void)
+{
+ handle_softirqs(false);
+}
+
/**
* irq_enter_rcu - Enter an interrupt context with RCU watching
*/
@@ -918,7 +922,7 @@ static void run_ksoftirqd(unsigned int cpu)
* We can safely run softirq on inline stack, as we are not deep
* in the task stack here.
*/
- __do_softirq();
+ handle_softirqs(true);
ksoftirqd_run_end();
cond_resched();
return;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 2ec1473146ca..f9f0c198cb43 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1568,6 +1568,11 @@ static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
*
* As a safety measure we check to make sure the data pages have not
* been corrupted.
+ *
+ * Callers of this function need to guarantee that the list of pages doesn't get
+ * modified during the check. In particular, if it's possible that the function
+ * is invoked with concurrent readers which can swap in a new reader page then
+ * the caller should take cpu_buffer->reader_lock.
*/
static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
{
@@ -2289,8 +2294,12 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
*/
synchronize_rcu();
for_each_buffer_cpu(buffer, cpu) {
+ unsigned long flags;
+
cpu_buffer = buffer->buffers[cpu];
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
rb_check_pages(cpu_buffer);
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
atomic_dec(&buffer->record_disabled);
}