summaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm/smp.h
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
commit76cb841cb886eef6b3bee341a2266c76578724ad (patch)
treef5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /arch/mips/include/asm/smp.h
parentInitial commit. (diff)
downloadlinux-76cb841cb886eef6b3bee341a2266c76578724ad.tar.xz
linux-76cb841cb886eef6b3bee341a2266c76578724ad.zip
Adding upstream version 4.19.249.upstream/4.19.249
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/mips/include/asm/smp.h')
-rw-r--r--arch/mips/include/asm/smp.h122
1 files changed, 122 insertions, 0 deletions
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
new file mode 100644
index 000000000..056a6bf13
--- /dev/null
+++ b/arch/mips/include/asm/smp.h
@@ -0,0 +1,122 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
+ * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
+ * Copyright (C) 2000, 2001, 2002 Ralf Baechle
+ * Copyright (C) 2000, 2001 Broadcom Corporation
+ */
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+#include <linux/bitops.h>
+#include <linux/linkage.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+
+#include <linux/atomic.h>
+#include <asm/smp-ops.h>
+
+extern int smp_num_siblings;
+extern cpumask_t cpu_sibling_map[];
+extern cpumask_t cpu_core_map[];
+extern cpumask_t cpu_foreign_map[];
+
+static inline int raw_smp_processor_id(void)
+{
+#if defined(__VDSO__)
+ extern int vdso_smp_processor_id(void)
+ __compiletime_error("VDSO should not call smp_processor_id()");
+ return vdso_smp_processor_id();
+#else
+ return current_thread_info()->cpu;
+#endif
+}
+#define raw_smp_processor_id raw_smp_processor_id
+
+/* Map from cpu id to sequential logical cpu number. This will only
+ not be idempotent when cpus failed to come on-line. */
+extern int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP];
+#define cpu_number_map(cpu) __cpu_number_map[cpu]
+
+/* The reverse map from sequential logical cpu number to cpu id. */
+extern int __cpu_logical_map[NR_CPUS];
+#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
+
+#define NO_PROC_ID (-1)
+
+#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
+#define SMP_CALL_FUNCTION 0x2
+/* Octeon - Tell another core to flush its icache */
+#define SMP_ICACHE_FLUSH 0x4
+#define SMP_ASK_C0COUNT 0x8
+
+/* Mask of CPUs which are currently definitely operating coherently */
+extern cpumask_t cpu_coherent_mask;
+
+extern asmlinkage void smp_bootstrap(void);
+
+extern void calculate_cpu_foreign_map(void);
+
+/*
+ * this function sends a 'reschedule' IPI to another CPU.
+ * it goes straight through and wastes no time serializing
+ * anything. Worst case is that we lose a reschedule ...
+ */
+static inline void smp_send_reschedule(int cpu)
+{
+ extern const struct plat_smp_ops *mp_ops; /* private */
+
+ mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static inline int __cpu_disable(void)
+{
+ extern const struct plat_smp_ops *mp_ops; /* private */
+
+ return mp_ops->cpu_disable();
+}
+
+static inline void __cpu_die(unsigned int cpu)
+{
+ extern const struct plat_smp_ops *mp_ops; /* private */
+
+ mp_ops->cpu_die(cpu);
+}
+
+extern void play_dead(void);
+#endif
+
+/*
+ * This function will set up the necessary IPIs for Linux to communicate
+ * with the CPUs in mask.
+ * Return 0 on success.
+ */
+int mips_smp_ipi_allocate(const struct cpumask *mask);
+
+/*
+ * This function will free up IPIs allocated with mips_smp_ipi_allocate to the
+ * CPUs in mask, which must be a subset of the IPIs that have been configured.
+ * Return 0 on success.
+ */
+int mips_smp_ipi_free(const struct cpumask *mask);
+
+static inline void arch_send_call_function_single_ipi(int cpu)
+{
+ extern const struct plat_smp_ops *mp_ops; /* private */
+
+ mp_ops->send_ipi_mask(cpumask_of(cpu), SMP_CALL_FUNCTION);
+}
+
+static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ extern const struct plat_smp_ops *mp_ops; /* private */
+
+ mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
+}
+
+#endif /* __ASM_SMP_H */