diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-08 03:43:31 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-08 03:43:31 +0000 |
commit | 1327913aeadf5737e7ebf158f93632d6df2eba1d (patch) | |
tree | 2ca5c2a3aa381788eca2580f72a2d5c60934b28a /kernel | |
parent | Adding upstream version 4.19.260. (diff) | |
download | linux-1327913aeadf5737e7ebf158f93632d6df2eba1d.tar.xz linux-1327913aeadf5737e7ebf158f93632d6df2eba1d.zip |
Adding upstream version 4.19.269.upstream/4.19.269
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/bpf/btf.c | 2 | ||||
-rw-r--r-- | kernel/bpf/syscall.c | 2 | ||||
-rw-r--r-- | kernel/cgroup/cgroup-internal.h | 1 | ||||
-rw-r--r-- | kernel/gcov/gcc_4_7.c | 18 | ||||
-rw-r--r-- | kernel/kprobes.c | 5 | ||||
-rw-r--r-- | kernel/livepatch/transition.c | 18 | ||||
-rw-r--r-- | kernel/power/hibernate.c | 2 | ||||
-rw-r--r-- | kernel/sysctl.c | 30 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 13 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 50 |
10 files changed, 110 insertions, 31 deletions
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 471cc5c11..62e05b628 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -1660,7 +1660,7 @@ static int btf_struct_resolve(struct btf_verifier_env *env, if (v->next_member) { const struct btf_type *last_member_type; const struct btf_member *last_member; - u16 last_member_type_id; + u32 last_member_type_id; last_member = btf_type_member(v->t) + v->next_member - 1; last_member_type_id = last_member->type; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index e940c1f65..02e5bdb82 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2325,7 +2325,9 @@ static int bpf_task_fd_query(const union bpf_attr *attr, if (attr->task_fd_query.flags != 0) return -EINVAL; + rcu_read_lock(); task = get_pid_task(find_vpid(pid), PIDTYPE_PID); + rcu_read_unlock(); if (!task) return -ENOENT; diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h index 8f7729b0a..4168e7d97 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h @@ -148,7 +148,6 @@ extern struct mutex cgroup_mutex; extern spinlock_t css_set_lock; extern struct cgroup_subsys *cgroup_subsys[]; extern struct list_head cgroup_roots; -extern struct file_system_type cgroup_fs_type; /* iterate across the hierarchies */ #define for_each_root(root) \ diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index 5b9e76117..bb6ccf456 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c @@ -33,6 +33,13 @@ #define GCOV_TAG_FUNCTION_LENGTH 3 +/* Since GCC 12.1 sizes are in BYTES and not in WORDS (4B). */ +#if (__GNUC__ >= 12) +#define GCOV_UNIT_SIZE 4 +#else +#define GCOV_UNIT_SIZE 1 +#endif + static struct gcov_info *gcov_info_head; /** @@ -439,12 +446,18 @@ static size_t convert_to_gcda(char *buffer, struct gcov_info *info) pos += store_gcov_u32(buffer, pos, info->version); pos += store_gcov_u32(buffer, pos, info->stamp); +#if (__GNUC__ >= 12) + /* Use zero as checksum of the compilation unit. */ + pos += store_gcov_u32(buffer, pos, 0); +#endif + for (fi_idx = 0; fi_idx < info->n_functions; fi_idx++) { fi_ptr = info->functions[fi_idx]; /* Function record. */ pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION); - pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION_LENGTH); + pos += store_gcov_u32(buffer, pos, + GCOV_TAG_FUNCTION_LENGTH * GCOV_UNIT_SIZE); pos += store_gcov_u32(buffer, pos, fi_ptr->ident); pos += store_gcov_u32(buffer, pos, fi_ptr->lineno_checksum); pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum); @@ -458,7 +471,8 @@ static size_t convert_to_gcda(char *buffer, struct gcov_info *info) /* Counter record. */ pos += store_gcov_u32(buffer, pos, GCOV_TAG_FOR_COUNTER(ct_idx)); - pos += store_gcov_u32(buffer, pos, ci_ptr->num * 2); + pos += store_gcov_u32(buffer, pos, + ci_ptr->num * 2 * GCOV_UNIT_SIZE); for (cv_idx = 0; cv_idx < ci_ptr->num; cv_idx++) { pos += store_gcov_u64(buffer, pos, diff --git a/kernel/kprobes.c b/kernel/kprobes.c index b2f01e61f..33aba4e2e 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -2172,8 +2172,11 @@ int enable_kprobe(struct kprobe *kp) if (!kprobes_all_disarmed && kprobe_disabled(p)) { p->flags &= ~KPROBE_FLAG_DISABLED; ret = arm_kprobe(p); - if (ret) + if (ret) { p->flags |= KPROBE_FLAG_DISABLED; + if (p != kp) + kp->flags |= KPROBE_FLAG_DISABLED; + } } out: mutex_unlock(&kprobe_mutex); diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index 5bc349805..20277ad6b 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -563,9 +563,23 @@ void klp_reverse_transition(void) /* Called from copy_process() during fork */ void klp_copy_process(struct task_struct *child) { - child->patch_state = current->patch_state; - /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ + /* + * The parent process may have gone through a KLP transition since + * the thread flag was copied in setup_thread_stack earlier. Bring + * the task flag up to date with the parent here. + * + * The operation is serialized against all klp_*_transition() + * operations by the tasklist_lock. The only exception is + * klp_update_patch_state(current), but we cannot race with + * that because we are current. + */ + if (test_tsk_thread_flag(current, TIF_PATCH_PENDING)) + set_tsk_thread_flag(child, TIF_PATCH_PENDING); + else + clear_tsk_thread_flag(child, TIF_PATCH_PENDING); + + child->patch_state = current->patch_state; } /* diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 6abdfdf57..6737ae6ff 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -626,7 +626,7 @@ static void power_down(void) int error; if (hibernation_mode == HIBERNATION_SUSPEND) { - error = suspend_devices_and_enter(PM_SUSPEND_MEM); + error = suspend_devices_and_enter(mem_sleep_current); if (error) { hibernation_mode = hibernation_ops ? HIBERNATION_PLATFORM : diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 93a7bd418..4bb194f09 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2081,13 +2081,14 @@ int proc_dostring(struct ctl_table *table, int write, (char __user *)buffer, lenp, ppos); } -static size_t proc_skip_spaces(char **buf) +static void proc_skip_spaces(char **buf, size_t *size) { - size_t ret; - char *tmp = skip_spaces(*buf); - ret = tmp - *buf; - *buf = tmp; - return ret; + while (*size) { + if (!isspace(**buf)) + break; + (*size)--; + (*buf)++; + } } static void proc_skip_char(char **buf, size_t *size, const char v) @@ -2156,13 +2157,12 @@ static int proc_get_long(char **buf, size_t *size, unsigned long *val, bool *neg, const char *perm_tr, unsigned perm_tr_len, char *tr) { - int len; char *p, tmp[TMPBUFLEN]; + ssize_t len = *size; - if (!*size) + if (len <= 0) return -EINVAL; - len = *size; if (len > TMPBUFLEN - 1) len = TMPBUFLEN - 1; @@ -2325,7 +2325,7 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, bool neg; if (write) { - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (!left) break; @@ -2356,7 +2356,7 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, if (!write && !first && left && !err) err = proc_put_char(&buffer, &left, '\n'); if (write && !err && left) - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (write) { kfree(kbuf); if (first) @@ -2405,7 +2405,7 @@ static int do_proc_douintvec_w(unsigned int *tbl_data, if (IS_ERR(kbuf)) return -EINVAL; - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (!left) { err = -EINVAL; goto out_free; @@ -2425,7 +2425,7 @@ static int do_proc_douintvec_w(unsigned int *tbl_data, } if (!err && left) - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); out_free: kfree(kbuf); @@ -2846,7 +2846,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int if (write) { bool neg; - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (!left) break; @@ -2879,7 +2879,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int if (!write && !first && left && !err) err = proc_put_char(&buffer, &left, '\n'); if (write && !err) - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (write) { kfree(kbuf); if (first) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index d2272fff2..9c7795566 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1331,6 +1331,7 @@ static int ftrace_add_mod(struct trace_array *tr, if (!ftrace_mod) return -ENOMEM; + INIT_LIST_HEAD(&ftrace_mod->list); ftrace_mod->func = kstrdup(func, GFP_KERNEL); ftrace_mod->module = kstrdup(module, GFP_KERNEL); ftrace_mod->enable = enable; @@ -3033,7 +3034,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count) /* if we can't allocate this size, try something smaller */ if (!order) return -ENOMEM; - order >>= 1; + order--; goto again; } @@ -5054,8 +5055,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file) if (filter_hash) { orig_hash = &iter->ops->func_hash->filter_hash; - if (iter->tr && !list_empty(&iter->tr->mod_trace)) - iter->hash->flags |= FTRACE_HASH_FL_MOD; + if (iter->tr) { + if (list_empty(&iter->tr->mod_trace)) + iter->hash->flags &= ~FTRACE_HASH_FL_MOD; + else + iter->hash->flags |= FTRACE_HASH_FL_MOD; + } } else orig_hash = &iter->ops->func_hash->notrace_hash; @@ -6225,7 +6230,7 @@ void __init ftrace_init(void) } pr_info("ftrace: allocating %ld entries in %ld pages\n", - count, count / ENTRIES_PER_PAGE + 1); + count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); last_ftrace_enabled = ftrace_enabled = 1; diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 987d3447b..5e5b0c067 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -542,8 +542,9 @@ static void rb_wake_up_waiters(struct irq_work *work) struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); wake_up_all(&rbwork->waiters); - if (rbwork->wakeup_full) { + if (rbwork->full_waiters_pending || rbwork->wakeup_full) { rbwork->wakeup_full = false; + rbwork->full_waiters_pending = false; wake_up_all(&rbwork->full_waiters); } } @@ -1327,9 +1328,9 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) free_buffer_page(cpu_buffer->reader_page); - rb_head_page_deactivate(cpu_buffer); - if (head) { + rb_head_page_deactivate(cpu_buffer); + list_for_each_entry_safe(bpage, tmp, head, list) { list_del_init(&bpage->list); free_buffer_page(bpage); @@ -2156,6 +2157,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, /* Mark the rest of the page with padding */ rb_event_set_padding(event); + /* Make sure the padding is visible before the write update */ + smp_wmb(); + /* Set the write back to the previous setting */ local_sub(length, &tail_page->write); return; @@ -2167,6 +2171,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, /* time delta must be non zero */ event->time_delta = 1; + /* Make sure the padding is visible before the tail_page->write update */ + smp_wmb(); + /* Set write to end of buffer */ length = (tail + length) - BUF_PAGE_SIZE; local_sub(length, &tail_page->write); @@ -3812,6 +3819,33 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) arch_spin_unlock(&cpu_buffer->lock); local_irq_restore(flags); + /* + * The writer has preempt disable, wait for it. But not forever + * Although, 1 second is pretty much "forever" + */ +#define USECS_WAIT 1000000 + for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) { + /* If the write is past the end of page, a writer is still updating it */ + if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE)) + break; + + udelay(1); + + /* Get the latest version of the reader write value */ + smp_rmb(); + } + + /* The writer is not moving forward? Something is wrong */ + if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT)) + reader = NULL; + + /* + * Make sure we see any padding after the write update + * (see rb_reset_tail()) + */ + smp_rmb(); + + return reader; } @@ -4770,7 +4804,15 @@ int ring_buffer_read_page(struct ring_buffer *buffer, unsigned int pos = 0; unsigned int size; - if (full) + /* + * If a full page is expected, this can still be returned + * if there's been a previous partial read and the + * rest of the page can be read and the commit page is off + * the reader page. + */ + if (full && + (!read || (len < (commit - read)) || + cpu_buffer->reader_page == cpu_buffer->commit_page)) goto out_unlock; if (len > (commit - read)) |