summaryrefslogtreecommitdiffstats
path: root/arch/arc/include/asm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-05 19:20:54 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-05 19:20:54 +0000
commit9cf27fc03a2eaf3d91167db54c19bad8652ac085 (patch)
tree42f079ff82e701ebcb76829974b4caca3e5b6798 /arch/arc/include/asm
parentAdding upstream version 6.8.12. (diff)
downloadlinux-9cf27fc03a2eaf3d91167db54c19bad8652ac085.tar.xz
linux-9cf27fc03a2eaf3d91167db54c19bad8652ac085.zip
Adding upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/arc/include/asm')
-rw-r--r--arch/arc/include/asm/dsp.h2
-rw-r--r--arch/arc/include/asm/entry-compact.h10
-rw-r--r--arch/arc/include/asm/entry.h4
-rw-r--r--arch/arc/include/asm/irq.h2
-rw-r--r--arch/arc/include/asm/irqflags-compact.h2
-rw-r--r--arch/arc/include/asm/mmu_context.h2
-rw-r--r--arch/arc/include/asm/pgtable-bits-arcv2.h2
-rw-r--r--arch/arc/include/asm/ptrace.h2
-rw-r--r--arch/arc/include/asm/shmparam.h2
-rw-r--r--arch/arc/include/asm/smp.h4
-rw-r--r--arch/arc/include/asm/thread_info.h2
11 files changed, 17 insertions, 17 deletions
diff --git a/arch/arc/include/asm/dsp.h b/arch/arc/include/asm/dsp.h
index 202c78e567..f496dbc464 100644
--- a/arch/arc/include/asm/dsp.h
+++ b/arch/arc/include/asm/dsp.h
@@ -12,7 +12,7 @@
/*
* DSP-related saved registers - need to be saved only when you are
* scheduled out.
- * structure fields name must correspond to aux register defenitions for
+ * structure fields name must correspond to aux register definitions for
* automatic offset calculation in DSP_AUX_SAVE_RESTORE macros
*/
struct dsp_callee_regs {
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index 92c3e9f132..00946fe04c 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -7,7 +7,7 @@
* Stack switching code can no longer reliably rely on the fact that
* if we are NOT in user mode, stack is switched to kernel mode.
* e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
- * it's prologue including stack switching from user mode
+ * its prologue including stack switching from user mode
*
* Vineetg: Aug 28th 2008: Bug #94984
* -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
@@ -143,7 +143,7 @@
* 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
* 3. But before it could switch SP from USER to KERNEL stack
* a L2 IRQ "Interrupts" L1
- * Thay way although L2 IRQ happened in Kernel mode, stack is still
+ * That way although L2 IRQ happened in Kernel mode, stack is still
* not switched.
* To handle this, we may need to switch stack even if in kernel mode
* provided SP has values in range of USER mode stack ( < 0x7000_0000 )
@@ -173,7 +173,7 @@
GET_CURR_TASK_ON_CPU r9
- /* With current tsk in r9, get it's kernel mode stack base */
+ /* With current tsk in r9, get its kernel mode stack base */
GET_TSK_STACK_BASE r9, r9
/* save U mode SP @ pt_regs->sp */
@@ -282,7 +282,7 @@
* NOTE:
*
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
- * for memory load operations. If used in that way interrupts are deffered
+ * for memory load operations. If used in that way interrupts are deferred
* by hardware and that is not good.
*-------------------------------------------------------------*/
.macro EXCEPTION_EPILOGUE
@@ -350,7 +350,7 @@
* NOTE:
*
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
- * for memory load operations. If used in that way interrupts are deffered
+ * for memory load operations. If used in that way interrupts are deferred
* by hardware and that is not good.
*-------------------------------------------------------------*/
.macro INTERRUPT_EPILOGUE LVL
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index cf1ba376e9..38c35722ce 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -7,7 +7,7 @@
#ifndef __ASM_ARC_ENTRY_H
#define __ASM_ARC_ENTRY_H
-#include <asm/unistd.h> /* For NR_syscalls defination */
+#include <asm/unistd.h> /* For NR_syscalls definition */
#include <asm/arcregs.h>
#include <asm/ptrace.h>
#include <asm/processor.h> /* For VMALLOC_START */
@@ -56,7 +56,7 @@
.endm
/*-------------------------------------------------------------
- * given a tsk struct, get to the base of it's kernel mode stack
+ * given a tsk struct, get to the base of its kernel mode stack
* tsk->thread_info is really a PAGE, whose bottom hoists stack
* which grows upwards towards thread_info
*------------------------------------------------------------*/
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index c574712ad8..9cd79263ac 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -10,7 +10,7 @@
* ARCv2 can support 240 interrupts in the core interrupts controllers and
* 128 interrupts in IDU. Thus 512 virtual IRQs must be enough for most
* configurations of boards.
- * This doesnt affect ARCompact, but we change it to same value
+ * This doesn't affect ARCompact, but we change it to same value
*/
#define NR_IRQS 512
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
index 0d63e568d6..936a2f21f3 100644
--- a/arch/arc/include/asm/irqflags-compact.h
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -46,7 +46,7 @@
* IRQ Control Macros
*
* All of them have "memory" clobber (compiler barrier) which is needed to
- * ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available)
+ * ensure that LD/ST requiring irq safety (R-M-W when LLSC is not available)
* are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
*
* Noted at the time of Abilis Timer List corruption
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index dda471f5f0..9963bb1a57 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -165,7 +165,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* for retiring-mm. However destroy_context( ) still needs to do that because
* between mm_release( ) = >deactive_mm( ) and
* mmput => .. => __mmdrop( ) => destroy_context( )
- * there is a good chance that task gets sched-out/in, making it's ASID valid
+ * there is a good chance that task gets sched-out/in, making its ASID valid
* again (this teased me for a whole day).
*/
diff --git a/arch/arc/include/asm/pgtable-bits-arcv2.h b/arch/arc/include/asm/pgtable-bits-arcv2.h
index f3eea3f30b..8ebec1b21d 100644
--- a/arch/arc/include/asm/pgtable-bits-arcv2.h
+++ b/arch/arc/include/asm/pgtable-bits-arcv2.h
@@ -66,7 +66,7 @@
* Other rules which cause the divergence from 1:1 mapping
*
* 1. Although ARC700 can do exclusive execute/write protection (meaning R
- * can be tracked independet of X/W unlike some other CPUs), still to
+ * can be tracked independently of X/W unlike some other CPUs), still to
* keep things consistent with other archs:
* -Write implies Read: W => R
* -Execute implies Read: X => R
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 00b9318e55..cf79df0b25 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -169,7 +169,7 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
return *(unsigned long *)((unsigned long)regs + offset);
}
-extern int syscall_trace_entry(struct pt_regs *);
+extern int syscall_trace_enter(struct pt_regs *);
extern void syscall_trace_exit(struct pt_regs *);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h
index 8b0251464f..719112af0f 100644
--- a/arch/arc/include/asm/shmparam.h
+++ b/arch/arc/include/asm/shmparam.h
@@ -6,7 +6,7 @@
#ifndef __ARC_ASM_SHMPARAM_H
#define __ARC_ASM_SHMPARAM_H
-/* Handle upto 2 cache bins */
+/* Handle up to 2 cache bins */
#define SHMLBA (2 * PAGE_SIZE)
/* Enforce SHMLBA in shmat */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index e0913f52c2..990f834909 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -77,7 +77,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
/*
* ARC700 doesn't support atomic Read-Modify-Write ops.
- * Originally Interrupts had to be disabled around code to gaurantee atomicity.
+ * Originally Interrupts had to be disabled around code to guarantee atomicity.
* The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
* based on retry-if-irq-in-atomic (with hardware assist).
* However despite these, we provide the IRQ disabling variant
@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
* support needed.
*
* (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
- * gaurantted by the platform (not something which core handles).
+ * guaranteed by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity.
*
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 4c530cf131..12daaf3a61 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -38,7 +38,7 @@
struct thread_info {
unsigned long flags; /* low level flags */
unsigned long ksp; /* kernel mode stack top in __switch_to */
- int preempt_count; /* 0 => preemptable, <0 => BUG */
+ int preempt_count; /* 0 => preemptible, <0 => BUG */
int cpu; /* current CPU */
unsigned long thr_ptr; /* TLS ptr */
struct task_struct *task; /* main task structure */