From ace9429bb58fd418f0c81d4c2835699bddf6bde6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 11 Apr 2024 10:27:49 +0200 Subject: Adding upstream version 6.6.15. Signed-off-by: Daniel Baumann --- arch/powerpc/platforms/cell/spufs/.gitignore | 3 + arch/powerpc/platforms/cell/spufs/Makefile | 63 + arch/powerpc/platforms/cell/spufs/backing_ops.c | 400 +++ arch/powerpc/platforms/cell/spufs/context.c | 175 ++ arch/powerpc/platforms/cell/spufs/coredump.c | 182 ++ arch/powerpc/platforms/cell/spufs/fault.c | 167 ++ arch/powerpc/platforms/cell/spufs/file.c | 2633 ++++++++++++++++++++ arch/powerpc/platforms/cell/spufs/gang.c | 74 + arch/powerpc/platforms/cell/spufs/hw_ops.c | 335 +++ arch/powerpc/platforms/cell/spufs/inode.c | 826 ++++++ arch/powerpc/platforms/cell/spufs/lscsa_alloc.c | 50 + arch/powerpc/platforms/cell/spufs/run.c | 451 ++++ arch/powerpc/platforms/cell/spufs/sched.c | 1141 +++++++++ arch/powerpc/platforms/cell/spufs/spu_restore.c | 322 +++ .../platforms/cell/spufs/spu_restore_crt0.S | 102 + .../cell/spufs/spu_restore_dump.h_shipped | 935 +++++++ arch/powerpc/platforms/cell/spufs/spu_save.c | 181 ++ arch/powerpc/platforms/cell/spufs/spu_save_crt0.S | 88 + .../platforms/cell/spufs/spu_save_dump.h_shipped | 743 ++++++ arch/powerpc/platforms/cell/spufs/spu_utils.h | 147 ++ arch/powerpc/platforms/cell/spufs/spufs.h | 356 +++ arch/powerpc/platforms/cell/spufs/sputrace.h | 41 + arch/powerpc/platforms/cell/spufs/switch.c | 2206 ++++++++++++++++ arch/powerpc/platforms/cell/spufs/syscalls.c | 89 + 24 files changed, 11710 insertions(+) create mode 100644 arch/powerpc/platforms/cell/spufs/.gitignore create mode 100644 arch/powerpc/platforms/cell/spufs/Makefile create mode 100644 arch/powerpc/platforms/cell/spufs/backing_ops.c create mode 100644 arch/powerpc/platforms/cell/spufs/context.c create mode 100644 arch/powerpc/platforms/cell/spufs/coredump.c create mode 100644 arch/powerpc/platforms/cell/spufs/fault.c create mode 100644 arch/powerpc/platforms/cell/spufs/file.c create mode 100644 arch/powerpc/platforms/cell/spufs/gang.c create mode 100644 arch/powerpc/platforms/cell/spufs/hw_ops.c create mode 100644 arch/powerpc/platforms/cell/spufs/inode.c create mode 100644 arch/powerpc/platforms/cell/spufs/lscsa_alloc.c create mode 100644 arch/powerpc/platforms/cell/spufs/run.c create mode 100644 arch/powerpc/platforms/cell/spufs/sched.c create mode 100644 arch/powerpc/platforms/cell/spufs/spu_restore.c create mode 100644 arch/powerpc/platforms/cell/spufs/spu_restore_crt0.S create mode 100644 arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped create mode 100644 arch/powerpc/platforms/cell/spufs/spu_save.c create mode 100644 arch/powerpc/platforms/cell/spufs/spu_save_crt0.S create mode 100644 arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped create mode 100644 arch/powerpc/platforms/cell/spufs/spu_utils.h create mode 100644 arch/powerpc/platforms/cell/spufs/spufs.h create mode 100644 arch/powerpc/platforms/cell/spufs/sputrace.h create mode 100644 arch/powerpc/platforms/cell/spufs/switch.c create mode 100644 arch/powerpc/platforms/cell/spufs/syscalls.c (limited to 'arch/powerpc/platforms/cell/spufs') diff --git a/arch/powerpc/platforms/cell/spufs/.gitignore b/arch/powerpc/platforms/cell/spufs/.gitignore new file mode 100644 index 0000000000..5f3eb224f6 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/.gitignore @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +spu_save_dump.h +spu_restore_dump.h diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile new file mode 100644 index 0000000000..52e4c80ec8 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/Makefile @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_SPU_FS) += spufs.o +spufs-y += inode.o file.o context.o syscalls.o +spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o +spufs-y += switch.o fault.o lscsa_alloc.o +spufs-$(CONFIG_COREDUMP) += coredump.o + +# magic for the trace events +CFLAGS_sched.o := -I$(src) + +# Rules to build switch.o with the help of SPU tool chain +SPU_CROSS := spu- +SPU_CC := $(SPU_CROSS)gcc +SPU_AS := $(SPU_CROSS)gcc +SPU_LD := $(SPU_CROSS)ld +SPU_OBJCOPY := $(SPU_CROSS)objcopy +SPU_CFLAGS := -O2 -Wall -I$(srctree)/include -D__KERNEL__ +SPU_AFLAGS := -c -D__ASSEMBLY__ -I$(srctree)/include -D__KERNEL__ +SPU_LDFLAGS := -N -Ttext=0x0 + +$(obj)/switch.o: $(obj)/spu_save_dump.h $(obj)/spu_restore_dump.h +clean-files := spu_save_dump.h spu_restore_dump.h + +# Compile SPU files + cmd_spu_cc = $(SPU_CC) $(SPU_CFLAGS) -c -o $@ $< +quiet_cmd_spu_cc = SPU_CC $@ +$(obj)/spu_%.o: $(src)/spu_%.c + $(call if_changed,spu_cc) + +# Assemble SPU files + cmd_spu_as = $(SPU_AS) $(SPU_AFLAGS) -o $@ $< +quiet_cmd_spu_as = SPU_AS $@ +$(obj)/spu_%.o: $(src)/spu_%.S + $(call if_changed,spu_as) + +# Link SPU Executables + cmd_spu_ld = $(SPU_LD) $(SPU_LDFLAGS) -o $@ $^ +quiet_cmd_spu_ld = SPU_LD $@ +$(obj)/spu_%: $(obj)/spu_%_crt0.o $(obj)/spu_%.o + $(call if_changed,spu_ld) + +# Copy into binary format + cmd_spu_objcopy = $(SPU_OBJCOPY) -O binary $< $@ +quiet_cmd_spu_objcopy = OBJCOPY $@ +$(obj)/spu_%.bin: $(src)/spu_% + $(call if_changed,spu_objcopy) + +# create C code from ELF executable +cmd_hexdump = ( \ + echo "/*" ; \ + echo " * $*_dump.h: Copyright (C) 2005 IBM." ; \ + echo " * Hex-dump auto generated from $*.c." ; \ + echo " * Do not edit!" ; \ + echo " */" ; \ + echo "static unsigned int $*_code[] " \ + "__attribute__((__aligned__(128))) = {" ; \ + hexdump -v -e '"0x" 4/1 "%02x" "," "\n"' $< ; \ + echo "};" ; \ + ) > $@ +quiet_cmd_hexdump = HEXDUMP $@ +$(obj)/%_dump.h: $(obj)/%.bin + $(call if_changed,hexdump) diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c new file mode 100644 index 0000000000..28a34a2caa --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c @@ -0,0 +1,400 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* backing_ops.c - query/set operations on saved SPU context. + * + * Copyright (C) IBM 2005 + * Author: Mark Nutter + * + * These register operations allow SPUFS to operate on saved + * SPU contexts rather than hardware. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include "spufs.h" + +/* + * Reads/writes to various problem and priv2 registers require + * state changes, i.e. generate SPU events, modify channel + * counts, etc. + */ + +static void gen_spu_event(struct spu_context *ctx, u32 event) +{ + u64 ch0_cnt; + u64 ch0_data; + u64 ch1_data; + + ch0_cnt = ctx->csa.spu_chnlcnt_RW[0]; + ch0_data = ctx->csa.spu_chnldata_RW[0]; + ch1_data = ctx->csa.spu_chnldata_RW[1]; + ctx->csa.spu_chnldata_RW[0] |= event; + if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) { + ctx->csa.spu_chnlcnt_RW[0] = 1; + } +} + +static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data) +{ + u32 mbox_stat; + int ret = 0; + + spin_lock(&ctx->csa.register_lock); + mbox_stat = ctx->csa.prob.mb_stat_R; + if (mbox_stat & 0x0000ff) { + /* Read the first available word. + * Implementation note: the depth + * of pu_mb_R is currently 1. + */ + *data = ctx->csa.prob.pu_mb_R; + ctx->csa.prob.mb_stat_R &= ~(0x0000ff); + ctx->csa.spu_chnlcnt_RW[28] = 1; + gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT); + ret = 4; + } + spin_unlock(&ctx->csa.register_lock); + return ret; +} + +static u32 spu_backing_mbox_stat_read(struct spu_context *ctx) +{ + return ctx->csa.prob.mb_stat_R; +} + +static __poll_t spu_backing_mbox_stat_poll(struct spu_context *ctx, + __poll_t events) +{ + __poll_t ret; + u32 stat; + + ret = 0; + spin_lock_irq(&ctx->csa.register_lock); + stat = ctx->csa.prob.mb_stat_R; + + /* if the requested event is there, return the poll + mask, otherwise enable the interrupt to get notified, + but first mark any pending interrupts as done so + we don't get woken up unnecessarily */ + + if (events & (EPOLLIN | EPOLLRDNORM)) { + if (stat & 0xff0000) + ret |= EPOLLIN | EPOLLRDNORM; + else { + ctx->csa.priv1.int_stat_class2_RW &= + ~CLASS2_MAILBOX_INTR; + ctx->csa.priv1.int_mask_class2_RW |= + CLASS2_ENABLE_MAILBOX_INTR; + } + } + if (events & (EPOLLOUT | EPOLLWRNORM)) { + if (stat & 0x00ff00) + ret = EPOLLOUT | EPOLLWRNORM; + else { + ctx->csa.priv1.int_stat_class2_RW &= + ~CLASS2_MAILBOX_THRESHOLD_INTR; + ctx->csa.priv1.int_mask_class2_RW |= + CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR; + } + } + spin_unlock_irq(&ctx->csa.register_lock); + return ret; +} + +static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data) +{ + int ret; + + spin_lock(&ctx->csa.register_lock); + if (ctx->csa.prob.mb_stat_R & 0xff0000) { + /* Read the first available word. + * Implementation note: the depth + * of puint_mb_R is currently 1. + */ + *data = ctx->csa.priv2.puint_mb_R; + ctx->csa.prob.mb_stat_R &= ~(0xff0000); + ctx->csa.spu_chnlcnt_RW[30] = 1; + gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT); + ret = 4; + } else { + /* make sure we get woken up by the interrupt */ + ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR; + ret = 0; + } + spin_unlock(&ctx->csa.register_lock); + return ret; +} + +static int spu_backing_wbox_write(struct spu_context *ctx, u32 data) +{ + int ret; + + spin_lock(&ctx->csa.register_lock); + if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) { + int slot = ctx->csa.spu_chnlcnt_RW[29]; + int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8; + + /* We have space to write wbox_data. + * Implementation note: the depth + * of spu_mb_W is currently 4. + */ + BUG_ON(avail != (4 - slot)); + ctx->csa.spu_mailbox_data[slot] = data; + ctx->csa.spu_chnlcnt_RW[29] = ++slot; + ctx->csa.prob.mb_stat_R &= ~(0x00ff00); + ctx->csa.prob.mb_stat_R |= (((4 - slot) & 0xff) << 8); + gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT); + ret = 4; + } else { + /* make sure we get woken up by the interrupt when space + becomes available */ + ctx->csa.priv1.int_mask_class2_RW |= + CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR; + ret = 0; + } + spin_unlock(&ctx->csa.register_lock); + return ret; +} + +static u32 spu_backing_signal1_read(struct spu_context *ctx) +{ + return ctx->csa.spu_chnldata_RW[3]; +} + +static void spu_backing_signal1_write(struct spu_context *ctx, u32 data) +{ + spin_lock(&ctx->csa.register_lock); + if (ctx->csa.priv2.spu_cfg_RW & 0x1) + ctx->csa.spu_chnldata_RW[3] |= data; + else + ctx->csa.spu_chnldata_RW[3] = data; + ctx->csa.spu_chnlcnt_RW[3] = 1; + gen_spu_event(ctx, MFC_SIGNAL_1_EVENT); + spin_unlock(&ctx->csa.register_lock); +} + +static u32 spu_backing_signal2_read(struct spu_context *ctx) +{ + return ctx->csa.spu_chnldata_RW[4]; +} + +static void spu_backing_signal2_write(struct spu_context *ctx, u32 data) +{ + spin_lock(&ctx->csa.register_lock); + if (ctx->csa.priv2.spu_cfg_RW & 0x2) + ctx->csa.spu_chnldata_RW[4] |= data; + else + ctx->csa.spu_chnldata_RW[4] = data; + ctx->csa.spu_chnlcnt_RW[4] = 1; + gen_spu_event(ctx, MFC_SIGNAL_2_EVENT); + spin_unlock(&ctx->csa.register_lock); +} + +static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val) +{ + u64 tmp; + + spin_lock(&ctx->csa.register_lock); + tmp = ctx->csa.priv2.spu_cfg_RW; + if (val) + tmp |= 1; + else + tmp &= ~1; + ctx->csa.priv2.spu_cfg_RW = tmp; + spin_unlock(&ctx->csa.register_lock); +} + +static u64 spu_backing_signal1_type_get(struct spu_context *ctx) +{ + return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0); +} + +static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val) +{ + u64 tmp; + + spin_lock(&ctx->csa.register_lock); + tmp = ctx->csa.priv2.spu_cfg_RW; + if (val) + tmp |= 2; + else + tmp &= ~2; + ctx->csa.priv2.spu_cfg_RW = tmp; + spin_unlock(&ctx->csa.register_lock); +} + +static u64 spu_backing_signal2_type_get(struct spu_context *ctx) +{ + return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0); +} + +static u32 spu_backing_npc_read(struct spu_context *ctx) +{ + return ctx->csa.prob.spu_npc_RW; +} + +static void spu_backing_npc_write(struct spu_context *ctx, u32 val) +{ + ctx->csa.prob.spu_npc_RW = val; +} + +static u32 spu_backing_status_read(struct spu_context *ctx) +{ + return ctx->csa.prob.spu_status_R; +} + +static char *spu_backing_get_ls(struct spu_context *ctx) +{ + return ctx->csa.lscsa->ls; +} + +static void spu_backing_privcntl_write(struct spu_context *ctx, u64 val) +{ + ctx->csa.priv2.spu_privcntl_RW = val; +} + +static u32 spu_backing_runcntl_read(struct spu_context *ctx) +{ + return ctx->csa.prob.spu_runcntl_RW; +} + +static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val) +{ + spin_lock(&ctx->csa.register_lock); + ctx->csa.prob.spu_runcntl_RW = val; + if (val & SPU_RUNCNTL_RUNNABLE) { + ctx->csa.prob.spu_status_R &= + ~SPU_STATUS_STOPPED_BY_STOP & + ~SPU_STATUS_STOPPED_BY_HALT & + ~SPU_STATUS_SINGLE_STEP & + ~SPU_STATUS_INVALID_INSTR & + ~SPU_STATUS_INVALID_CH; + ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING; + } else { + ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING; + } + spin_unlock(&ctx->csa.register_lock); +} + +static void spu_backing_runcntl_stop(struct spu_context *ctx) +{ + spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP); +} + +static void spu_backing_master_start(struct spu_context *ctx) +{ + struct spu_state *csa = &ctx->csa; + u64 sr1; + + spin_lock(&csa->register_lock); + sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK; + csa->priv1.mfc_sr1_RW = sr1; + spin_unlock(&csa->register_lock); +} + +static void spu_backing_master_stop(struct spu_context *ctx) +{ + struct spu_state *csa = &ctx->csa; + u64 sr1; + + spin_lock(&csa->register_lock); + sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; + csa->priv1.mfc_sr1_RW = sr1; + spin_unlock(&csa->register_lock); +} + +static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask, + u32 mode) +{ + struct spu_problem_collapsed *prob = &ctx->csa.prob; + int ret; + + spin_lock(&ctx->csa.register_lock); + ret = -EAGAIN; + if (prob->dma_querytype_RW) + goto out; + ret = 0; + /* FIXME: what are the side-effects of this? */ + prob->dma_querymask_RW = mask; + prob->dma_querytype_RW = mode; + /* In the current implementation, the SPU context is always + * acquired in runnable state when new bits are added to the + * mask (tagwait), so it's sufficient just to mask + * dma_tagstatus_R with the 'mask' parameter here. + */ + ctx->csa.prob.dma_tagstatus_R &= mask; +out: + spin_unlock(&ctx->csa.register_lock); + + return ret; +} + +static u32 spu_backing_read_mfc_tagstatus(struct spu_context * ctx) +{ + return ctx->csa.prob.dma_tagstatus_R; +} + +static u32 spu_backing_get_mfc_free_elements(struct spu_context *ctx) +{ + return ctx->csa.prob.dma_qstatus_R; +} + +static int spu_backing_send_mfc_command(struct spu_context *ctx, + struct mfc_dma_command *cmd) +{ + int ret; + + spin_lock(&ctx->csa.register_lock); + ret = -EAGAIN; + /* FIXME: set up priv2->puq */ + spin_unlock(&ctx->csa.register_lock); + + return ret; +} + +static void spu_backing_restart_dma(struct spu_context *ctx) +{ + ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND; +} + +struct spu_context_ops spu_backing_ops = { + .mbox_read = spu_backing_mbox_read, + .mbox_stat_read = spu_backing_mbox_stat_read, + .mbox_stat_poll = spu_backing_mbox_stat_poll, + .ibox_read = spu_backing_ibox_read, + .wbox_write = spu_backing_wbox_write, + .signal1_read = spu_backing_signal1_read, + .signal1_write = spu_backing_signal1_write, + .signal2_read = spu_backing_signal2_read, + .signal2_write = spu_backing_signal2_write, + .signal1_type_set = spu_backing_signal1_type_set, + .signal1_type_get = spu_backing_signal1_type_get, + .signal2_type_set = spu_backing_signal2_type_set, + .signal2_type_get = spu_backing_signal2_type_get, + .npc_read = spu_backing_npc_read, + .npc_write = spu_backing_npc_write, + .status_read = spu_backing_status_read, + .get_ls = spu_backing_get_ls, + .privcntl_write = spu_backing_privcntl_write, + .runcntl_read = spu_backing_runcntl_read, + .runcntl_write = spu_backing_runcntl_write, + .runcntl_stop = spu_backing_runcntl_stop, + .master_start = spu_backing_master_start, + .master_stop = spu_backing_master_stop, + .set_mfc_query = spu_backing_set_mfc_query, + .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus, + .get_mfc_free_elements = spu_backing_get_mfc_free_elements, + .send_mfc_command = spu_backing_send_mfc_command, + .restart_dma = spu_backing_restart_dma, +}; diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c new file mode 100644 index 0000000000..7a39cc414f --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SPU file system -- SPU context management + * + * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 + * + * Author: Arnd Bergmann + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include "spufs.h" +#include "sputrace.h" + + +atomic_t nr_spu_contexts = ATOMIC_INIT(0); + +struct spu_context *alloc_spu_context(struct spu_gang *gang) +{ + struct spu_context *ctx; + + ctx = kzalloc(sizeof *ctx, GFP_KERNEL); + if (!ctx) + goto out; + /* Binding to physical processor deferred + * until spu_activate(). + */ + if (spu_init_csa(&ctx->csa)) + goto out_free; + spin_lock_init(&ctx->mmio_lock); + mutex_init(&ctx->mapping_lock); + kref_init(&ctx->kref); + mutex_init(&ctx->state_mutex); + mutex_init(&ctx->run_mutex); + init_waitqueue_head(&ctx->ibox_wq); + init_waitqueue_head(&ctx->wbox_wq); + init_waitqueue_head(&ctx->stop_wq); + init_waitqueue_head(&ctx->mfc_wq); + init_waitqueue_head(&ctx->run_wq); + ctx->state = SPU_STATE_SAVED; + ctx->ops = &spu_backing_ops; + ctx->owner = get_task_mm(current); + INIT_LIST_HEAD(&ctx->rq); + INIT_LIST_HEAD(&ctx->aff_list); + if (gang) + spu_gang_add_ctx(gang, ctx); + + __spu_update_sched_info(ctx); + spu_set_timeslice(ctx); + ctx->stats.util_state = SPU_UTIL_IDLE_LOADED; + ctx->stats.tstamp = ktime_get_ns(); + + atomic_inc(&nr_spu_contexts); + goto out; +out_free: + kfree(ctx); + ctx = NULL; +out: + return ctx; +} + +void destroy_spu_context(struct kref *kref) +{ + struct spu_context *ctx; + ctx = container_of(kref, struct spu_context, kref); + spu_context_nospu_trace(destroy_spu_context__enter, ctx); + mutex_lock(&ctx->state_mutex); + spu_deactivate(ctx); + mutex_unlock(&ctx->state_mutex); + spu_fini_csa(&ctx->csa); + if (ctx->gang) + spu_gang_remove_ctx(ctx->gang, ctx); + if (ctx->prof_priv_kref) + kref_put(ctx->prof_priv_kref, ctx->prof_priv_release); + BUG_ON(!list_empty(&ctx->rq)); + atomic_dec(&nr_spu_contexts); + kfree(ctx->switch_log); + kfree(ctx); +} + +struct spu_context * get_spu_context(struct spu_context *ctx) +{ + kref_get(&ctx->kref); + return ctx; +} + +int put_spu_context(struct spu_context *ctx) +{ + return kref_put(&ctx->kref, &destroy_spu_context); +} + +/* give up the mm reference when the context is about to be destroyed */ +void spu_forget(struct spu_context *ctx) +{ + struct mm_struct *mm; + + /* + * This is basically an open-coded spu_acquire_saved, except that + * we don't acquire the state mutex interruptible, and we don't + * want this context to be rescheduled on release. + */ + mutex_lock(&ctx->state_mutex); + if (ctx->state != SPU_STATE_SAVED) + spu_deactivate(ctx); + + mm = ctx->owner; + ctx->owner = NULL; + mmput(mm); + spu_release(ctx); +} + +void spu_unmap_mappings(struct spu_context *ctx) +{ + mutex_lock(&ctx->mapping_lock); + if (ctx->local_store) + unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1); + if (ctx->mfc) + unmap_mapping_range(ctx->mfc, 0, SPUFS_MFC_MAP_SIZE, 1); + if (ctx->cntl) + unmap_mapping_range(ctx->cntl, 0, SPUFS_CNTL_MAP_SIZE, 1); + if (ctx->signal1) + unmap_mapping_range(ctx->signal1, 0, SPUFS_SIGNAL_MAP_SIZE, 1); + if (ctx->signal2) + unmap_mapping_range(ctx->signal2, 0, SPUFS_SIGNAL_MAP_SIZE, 1); + if (ctx->mss) + unmap_mapping_range(ctx->mss, 0, SPUFS_MSS_MAP_SIZE, 1); + if (ctx->psmap) + unmap_mapping_range(ctx->psmap, 0, SPUFS_PS_MAP_SIZE, 1); + mutex_unlock(&ctx->mapping_lock); +} + +/** + * spu_acquire_saved - lock spu contex and make sure it is in saved state + * @ctx: spu contex to lock + */ +int spu_acquire_saved(struct spu_context *ctx) +{ + int ret; + + spu_context_nospu_trace(spu_acquire_saved__enter, ctx); + + ret = spu_acquire(ctx); + if (ret) + return ret; + + if (ctx->state != SPU_STATE_SAVED) { + set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags); + spu_deactivate(ctx); + } + + return 0; +} + +/** + * spu_release_saved - unlock spu context and return it to the runqueue + * @ctx: context to unlock + */ +void spu_release_saved(struct spu_context *ctx) +{ + BUG_ON(ctx->state != SPU_STATE_SAVED); + + if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags) && + test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) + spu_activate(ctx, 0); + + spu_release(ctx); +} + diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c new file mode 100644 index 0000000000..1a58761801 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/coredump.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SPU core dump code + * + * (C) Copyright 2006 IBM Corp. + * + * Author: Dwayne Grant McConnell + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "spufs.h" + +static int spufs_ctx_note_size(struct spu_context *ctx, int dfd) +{ + int i, sz, total = 0; + char *name; + char fullname[80]; + + for (i = 0; spufs_coredump_read[i].name != NULL; i++) { + name = spufs_coredump_read[i].name; + sz = spufs_coredump_read[i].size; + + sprintf(fullname, "SPU/%d/%s", dfd, name); + + total += sizeof(struct elf_note); + total += roundup(strlen(fullname) + 1, 4); + total += roundup(sz, 4); + } + + return total; +} + +static int match_context(const void *v, struct file *file, unsigned fd) +{ + struct spu_context *ctx; + if (file->f_op != &spufs_context_fops) + return 0; + ctx = SPUFS_I(file_inode(file))->i_ctx; + if (ctx->flags & SPU_CREATE_NOSCHED) + return 0; + return fd + 1; +} + +/* + * The additional architecture-specific notes for Cell are various + * context files in the spu context. + * + * This function iterates over all open file descriptors and sees + * if they are a directory in spufs. In that case we use spufs + * internal functionality to dump them without needing to actually + * open the files. + */ +/* + * descriptor table is not shared, so files can't change or go away. + */ +static struct spu_context *coredump_next_context(int *fd) +{ + struct spu_context *ctx; + struct file *file; + int n = iterate_fd(current->files, *fd, match_context, NULL); + if (!n) + return NULL; + *fd = n - 1; + + rcu_read_lock(); + file = lookup_fd_rcu(*fd); + ctx = SPUFS_I(file_inode(file))->i_ctx; + get_spu_context(ctx); + rcu_read_unlock(); + + return ctx; +} + +int spufs_coredump_extra_notes_size(void) +{ + struct spu_context *ctx; + int size = 0, rc, fd; + + fd = 0; + while ((ctx = coredump_next_context(&fd)) != NULL) { + rc = spu_acquire_saved(ctx); + if (rc) { + put_spu_context(ctx); + break; + } + + rc = spufs_ctx_note_size(ctx, fd); + spu_release_saved(ctx); + if (rc < 0) { + put_spu_context(ctx); + break; + } + + size += rc; + + /* start searching the next fd next time */ + fd++; + put_spu_context(ctx); + } + + return size; +} + +static int spufs_arch_write_note(struct spu_context *ctx, int i, + struct coredump_params *cprm, int dfd) +{ + size_t sz = spufs_coredump_read[i].size; + char fullname[80]; + struct elf_note en; + int ret; + + sprintf(fullname, "SPU/%d/%s", dfd, spufs_coredump_read[i].name); + en.n_namesz = strlen(fullname) + 1; + en.n_descsz = sz; + en.n_type = NT_SPU; + + if (!dump_emit(cprm, &en, sizeof(en))) + return -EIO; + if (!dump_emit(cprm, fullname, en.n_namesz)) + return -EIO; + if (!dump_align(cprm, 4)) + return -EIO; + + if (spufs_coredump_read[i].dump) { + ret = spufs_coredump_read[i].dump(ctx, cprm); + if (ret < 0) + return ret; + } else { + char buf[32]; + + ret = snprintf(buf, sizeof(buf), "0x%.16llx", + spufs_coredump_read[i].get(ctx)); + if (ret >= sizeof(buf)) + return sizeof(buf); + + /* count trailing the NULL: */ + if (!dump_emit(cprm, buf, ret + 1)) + return -EIO; + } + + dump_skip_to(cprm, roundup(cprm->pos - ret + sz, 4)); + return 0; +} + +int spufs_coredump_extra_notes_write(struct coredump_params *cprm) +{ + struct spu_context *ctx; + int fd, j, rc; + + fd = 0; + while ((ctx = coredump_next_context(&fd)) != NULL) { + rc = spu_acquire_saved(ctx); + if (rc) + return rc; + + for (j = 0; spufs_coredump_read[j].name != NULL; j++) { + rc = spufs_arch_write_note(ctx, j, cprm, fd); + if (rc) { + spu_release_saved(ctx); + return rc; + } + } + + spu_release_saved(ctx); + + /* start searching the next fd next time */ + fd++; + } + + return 0; +} diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c new file mode 100644 index 0000000000..24adbe3c60 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/fault.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Low-level SPU handling + * + * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 + * + * Author: Arnd Bergmann + */ +#include +#include + +#include +#include + +#include "spufs.h" + +/** + * Handle an SPE event, depending on context SPU_CREATE_EVENTS_ENABLED flag. + * + * If the context was created with events, we just set the return event. + * Otherwise, send an appropriate signal to the process. + */ +static void spufs_handle_event(struct spu_context *ctx, + unsigned long ea, int type) +{ + if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { + ctx->event_return |= type; + wake_up_all(&ctx->stop_wq); + return; + } + + switch (type) { + case SPE_EVENT_INVALID_DMA: + force_sig_fault(SIGBUS, BUS_OBJERR, NULL); + break; + case SPE_EVENT_SPE_DATA_STORAGE: + ctx->ops->restart_dma(ctx); + force_sig_fault(SIGSEGV, SEGV_ACCERR, (void __user *)ea); + break; + case SPE_EVENT_DMA_ALIGNMENT: + /* DAR isn't set for an alignment fault :( */ + force_sig_fault(SIGBUS, BUS_ADRALN, NULL); + break; + case SPE_EVENT_SPE_ERROR: + force_sig_fault( + SIGILL, ILL_ILLOPC, + (void __user *)(unsigned long) + ctx->ops->npc_read(ctx) - 4); + break; + } +} + +int spufs_handle_class0(struct spu_context *ctx) +{ + unsigned long stat = ctx->csa.class_0_pending & CLASS0_INTR_MASK; + + if (likely(!stat)) + return 0; + + if (stat & CLASS0_DMA_ALIGNMENT_INTR) + spufs_handle_event(ctx, ctx->csa.class_0_dar, + SPE_EVENT_DMA_ALIGNMENT); + + if (stat & CLASS0_INVALID_DMA_COMMAND_INTR) + spufs_handle_event(ctx, ctx->csa.class_0_dar, + SPE_EVENT_INVALID_DMA); + + if (stat & CLASS0_SPU_ERROR_INTR) + spufs_handle_event(ctx, ctx->csa.class_0_dar, + SPE_EVENT_SPE_ERROR); + + ctx->csa.class_0_pending = 0; + + return -EIO; +} + +/* + * bottom half handler for page faults, we can't do this from + * interrupt context, since we might need to sleep. + * we also need to give up the mutex so we can get scheduled + * out while waiting for the backing store. + * + * TODO: try calling hash_page from the interrupt handler first + * in order to speed up the easy case. + */ +int spufs_handle_class1(struct spu_context *ctx) +{ + u64 ea, dsisr, access; + unsigned long flags; + vm_fault_t flt = 0; + int ret; + + /* + * dar and dsisr get passed from the registers + * to the spu_context, to this function, but not + * back to the spu if it gets scheduled again. + * + * if we don't handle the fault for a saved context + * in time, we can still expect to get the same fault + * the immediately after the context restore. + */ + ea = ctx->csa.class_1_dar; + dsisr = ctx->csa.class_1_dsisr; + + if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) + return 0; + + spuctx_switch_state(ctx, SPU_UTIL_IOWAIT); + + pr_debug("ctx %p: ea %016llx, dsisr %016llx state %d\n", ctx, ea, + dsisr, ctx->state); + + ctx->stats.hash_flt++; + if (ctx->state == SPU_STATE_RUNNABLE) + ctx->spu->stats.hash_flt++; + + /* we must not hold the lock when entering copro_handle_mm_fault */ + spu_release(ctx); + + access = (_PAGE_PRESENT | _PAGE_READ); + access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_WRITE : 0UL; + local_irq_save(flags); + ret = hash_page(ea, access, 0x300, dsisr); + local_irq_restore(flags); + + /* hashing failed, so try the actual fault handler */ + if (ret) + ret = copro_handle_mm_fault(current->mm, ea, dsisr, &flt); + + /* + * This is nasty: we need the state_mutex for all the bookkeeping even + * if the syscall was interrupted by a signal. ewww. + */ + mutex_lock(&ctx->state_mutex); + + /* + * Clear dsisr under ctxt lock after handling the fault, so that + * time slicing will not preempt the context while the page fault + * handler is running. Context switch code removes mappings. + */ + ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0; + + /* + * If we handled the fault successfully and are in runnable + * state, restart the DMA. + * In case of unhandled error report the problem to user space. + */ + if (!ret) { + if (flt & VM_FAULT_MAJOR) + ctx->stats.maj_flt++; + else + ctx->stats.min_flt++; + if (ctx->state == SPU_STATE_RUNNABLE) { + if (flt & VM_FAULT_MAJOR) + ctx->spu->stats.maj_flt++; + else + ctx->spu->stats.min_flt++; + } + + if (ctx->spu) + ctx->ops->restart_dma(ctx); + } else + spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE); + + spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); + return ret; +} diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c new file mode 100644 index 0000000000..02a8158c46 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -0,0 +1,2633 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SPU file system -- file contents + * + * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 + * + * Author: Arnd Bergmann + */ + +#undef DEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "spufs.h" +#include "sputrace.h" + +#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000) + +/* Simple attribute files */ +struct spufs_attr { + int (*get)(void *, u64 *); + int (*set)(void *, u64); + char get_buf[24]; /* enough to store a u64 and "\n\0" */ + char set_buf[24]; + void *data; + const char *fmt; /* format for read operation */ + struct mutex mutex; /* protects access to these buffers */ +}; + +static int spufs_attr_open(struct inode *inode, struct file *file, + int (*get)(void *, u64 *), int (*set)(void *, u64), + const char *fmt) +{ + struct spufs_attr *attr; + + attr = kmalloc(sizeof(*attr), GFP_KERNEL); + if (!attr) + return -ENOMEM; + + attr->get = get; + attr->set = set; + attr->data = inode->i_private; + attr->fmt = fmt; + mutex_init(&attr->mutex); + file->private_data = attr; + + return nonseekable_open(inode, file); +} + +static int spufs_attr_release(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + return 0; +} + +static ssize_t spufs_attr_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos) +{ + struct spufs_attr *attr; + size_t size; + ssize_t ret; + + attr = file->private_data; + if (!attr->get) + return -EACCES; + + ret = mutex_lock_interruptible(&attr->mutex); + if (ret) + return ret; + + if (*ppos) { /* continued read */ + size = strlen(attr->get_buf); + } else { /* first read */ + u64 val; + ret = attr->get(attr->data, &val); + if (ret) + goto out; + + size = scnprintf(attr->get_buf, sizeof(attr->get_buf), + attr->fmt, (unsigned long long)val); + } + + ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); +out: + mutex_unlock(&attr->mutex); + return ret; +} + +static ssize_t spufs_attr_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + struct spufs_attr *attr; + u64 val; + size_t size; + ssize_t ret; + + attr = file->private_data; + if (!attr->set) + return -EACCES; + + ret = mutex_lock_interruptible(&attr->mutex); + if (ret) + return ret; + + ret = -EFAULT; + size = min(sizeof(attr->set_buf) - 1, len); + if (copy_from_user(attr->set_buf, buf, size)) + goto out; + + ret = len; /* claim we got the whole input */ + attr->set_buf[size] = '\0'; + val = simple_strtol(attr->set_buf, NULL, 0); + attr->set(attr->data, val); +out: + mutex_unlock(&attr->mutex); + return ret; +} + +static ssize_t spufs_dump_emit(struct coredump_params *cprm, void *buf, + size_t size) +{ + if (!dump_emit(cprm, buf, size)) + return -EIO; + return size; +} + +#define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ +static int __fops ## _open(struct inode *inode, struct file *file) \ +{ \ + __simple_attr_check_format(__fmt, 0ull); \ + return spufs_attr_open(inode, file, __get, __set, __fmt); \ +} \ +static const struct file_operations __fops = { \ + .open = __fops ## _open, \ + .release = spufs_attr_release, \ + .read = spufs_attr_read, \ + .write = spufs_attr_write, \ + .llseek = generic_file_llseek, \ +}; + + +static int +spufs_mem_open(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + mutex_lock(&ctx->mapping_lock); + file->private_data = ctx; + if (!i->i_openers++) + ctx->local_store = inode->i_mapping; + mutex_unlock(&ctx->mapping_lock); + return 0; +} + +static int +spufs_mem_release(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + mutex_lock(&ctx->mapping_lock); + if (!--i->i_openers) + ctx->local_store = NULL; + mutex_unlock(&ctx->mapping_lock); + return 0; +} + +static ssize_t +spufs_mem_dump(struct spu_context *ctx, struct coredump_params *cprm) +{ + return spufs_dump_emit(cprm, ctx->ops->get_ls(ctx), LS_SIZE); +} + +static ssize_t +spufs_mem_read(struct file *file, char __user *buffer, + size_t size, loff_t *pos) +{ + struct spu_context *ctx = file->private_data; + ssize_t ret; + + ret = spu_acquire(ctx); + if (ret) + return ret; + ret = simple_read_from_buffer(buffer, size, pos, ctx->ops->get_ls(ctx), + LS_SIZE); + spu_release(ctx); + + return ret; +} + +static ssize_t +spufs_mem_write(struct file *file, const char __user *buffer, + size_t size, loff_t *ppos) +{ + struct spu_context *ctx = file->private_data; + char *local_store; + loff_t pos = *ppos; + int ret; + + if (pos > LS_SIZE) + return -EFBIG; + + ret = spu_acquire(ctx); + if (ret) + return ret; + + local_store = ctx->ops->get_ls(ctx); + size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size); + spu_release(ctx); + + return size; +} + +static vm_fault_t +spufs_mem_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct spu_context *ctx = vma->vm_file->private_data; + unsigned long pfn, offset; + vm_fault_t ret; + + offset = vmf->pgoff << PAGE_SHIFT; + if (offset >= LS_SIZE) + return VM_FAULT_SIGBUS; + + pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n", + vmf->address, offset); + + if (spu_acquire(ctx)) + return VM_FAULT_NOPAGE; + + if (ctx->state == SPU_STATE_SAVED) { + vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); + pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset); + } else { + vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); + pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; + } + ret = vmf_insert_pfn(vma, vmf->address, pfn); + + spu_release(ctx); + + return ret; +} + +static int spufs_mem_mmap_access(struct vm_area_struct *vma, + unsigned long address, + void *buf, int len, int write) +{ + struct spu_context *ctx = vma->vm_file->private_data; + unsigned long offset = address - vma->vm_start; + char *local_store; + + if (write && !(vma->vm_flags & VM_WRITE)) + return -EACCES; + if (spu_acquire(ctx)) + return -EINTR; + if ((offset + len) > vma->vm_end) + len = vma->vm_end - offset; + local_store = ctx->ops->get_ls(ctx); + if (write) + memcpy_toio(local_store + offset, buf, len); + else + memcpy_fromio(buf, local_store + offset, len); + spu_release(ctx); + return len; +} + +static const struct vm_operations_struct spufs_mem_mmap_vmops = { + .fault = spufs_mem_mmap_fault, + .access = spufs_mem_mmap_access, +}; + +static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) +{ + if (!(vma->vm_flags & VM_SHARED)) + return -EINVAL; + + vm_flags_set(vma, VM_IO | VM_PFNMAP); + vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); + + vma->vm_ops = &spufs_mem_mmap_vmops; + return 0; +} + +static const struct file_operations spufs_mem_fops = { + .open = spufs_mem_open, + .release = spufs_mem_release, + .read = spufs_mem_read, + .write = spufs_mem_write, + .llseek = generic_file_llseek, + .mmap = spufs_mem_mmap, +}; + +static vm_fault_t spufs_ps_fault(struct vm_fault *vmf, + unsigned long ps_offs, + unsigned long ps_size) +{ + struct spu_context *ctx = vmf->vma->vm_file->private_data; + unsigned long area, offset = vmf->pgoff << PAGE_SHIFT; + int err = 0; + vm_fault_t ret = VM_FAULT_NOPAGE; + + spu_context_nospu_trace(spufs_ps_fault__enter, ctx); + + if (offset >= ps_size) + return VM_FAULT_SIGBUS; + + if (fatal_signal_pending(current)) + return VM_FAULT_SIGBUS; + + /* + * Because we release the mmap_lock, the context may be destroyed while + * we're in spu_wait. Grab an extra reference so it isn't destroyed + * in the meantime. + */ + get_spu_context(ctx); + + /* + * We have to wait for context to be loaded before we have + * pages to hand out to the user, but we don't want to wait + * with the mmap_lock held. + * It is possible to drop the mmap_lock here, but then we need + * to return VM_FAULT_NOPAGE because the mappings may have + * hanged. + */ + if (spu_acquire(ctx)) + goto refault; + + if (ctx->state == SPU_STATE_SAVED) { + mmap_read_unlock(current->mm); + spu_context_nospu_trace(spufs_ps_fault__sleep, ctx); + err = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); + spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu); + mmap_read_lock(current->mm); + } else { + area = ctx->spu->problem_phys + ps_offs; + ret = vmf_insert_pfn(vmf->vma, vmf->address, + (area + offset) >> PAGE_SHIFT); + spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu); + } + + if (!err) + spu_release(ctx); + +refault: + put_spu_context(ctx); + return ret; +} + +#if SPUFS_MMAP_4K +static vm_fault_t spufs_cntl_mmap_fault(struct vm_fault *vmf) +{ + return spufs_ps_fault(vmf, 0x4000, SPUFS_CNTL_MAP_SIZE); +} + +static const struct vm_operations_struct spufs_cntl_mmap_vmops = { + .fault = spufs_cntl_mmap_fault, +}; + +/* + * mmap support for problem state control area [0x4000 - 0x4fff]. + */ +static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) +{ + if (!(vma->vm_flags & VM_SHARED)) + return -EINVAL; + + vm_flags_set(vma, VM_IO | VM_PFNMAP); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + vma->vm_ops = &spufs_cntl_mmap_vmops; + return 0; +} +#else /* SPUFS_MMAP_4K */ +#define spufs_cntl_mmap NULL +#endif /* !SPUFS_MMAP_4K */ + +static int spufs_cntl_get(void *data, u64 *val) +{ + struct spu_context *ctx = data; + int ret; + + ret = spu_acquire(ctx); + if (ret) + return ret; + *val = ctx->ops->status_read(ctx); + spu_release(ctx); + + return 0; +} + +static int spufs_cntl_set(void *data, u64 val) +{ + struct spu_context *ctx = data; + int ret; + + ret = spu_acquire(ctx); + if (ret) + return ret; + ctx->ops->runcntl_write(ctx, val); + spu_release(ctx); + + return 0; +} + +static int spufs_cntl_open(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + mutex_lock(&ctx->mapping_lock); + file->private_data = ctx; + if (!i->i_openers++) + ctx->cntl = inode->i_mapping; + mutex_unlock(&ctx->mapping_lock); + return simple_attr_open(inode, file, spufs_cntl_get, + spufs_cntl_set, "0x%08lx"); +} + +static int +spufs_cntl_release(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + simple_attr_release(inode, file); + + mutex_lock(&ctx->mapping_lock); + if (!--i->i_openers) + ctx->cntl = NULL; + mutex_unlock(&ctx->mapping_lock); + return 0; +} + +static const struct file_operations spufs_cntl_fops = { + .open = spufs_cntl_open, + .release = spufs_cntl_release, + .read = simple_attr_read, + .write = simple_attr_write, + .llseek = no_llseek, + .mmap = spufs_cntl_mmap, +}; + +static int +spufs_regs_open(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + file->private_data = i->i_ctx; + return 0; +} + +static ssize_t +spufs_regs_dump(struct spu_context *ctx, struct coredump_params *cprm) +{ + return spufs_dump_emit(cprm, ctx->csa.lscsa->gprs, + sizeof(ctx->csa.lscsa->gprs)); +} + +static ssize_t +spufs_regs_read(struct file *file, char __user *buffer, + size_t size, loff_t *pos) +{ + int ret; + struct spu_context *ctx = file->private_data; + + /* pre-check for file position: if we'd return EOF, there's no point + * causing a deschedule */ + if (*pos >= sizeof(ctx->csa.lscsa->gprs)) + return 0; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + ret = simple_read_from_buffer(buffer, size, pos, ctx->csa.lscsa->gprs, + sizeof(ctx->csa.lscsa->gprs)); + spu_release_saved(ctx); + return ret; +} + +static ssize_t +spufs_regs_write(struct file *file, const char __user *buffer, + size_t size, loff_t *pos) +{ + struct spu_context *ctx = file->private_data; + struct spu_lscsa *lscsa = ctx->csa.lscsa; + int ret; + + if (*pos >= sizeof(lscsa->gprs)) + return -EFBIG; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + + size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos, + buffer, size); + + spu_release_saved(ctx); + return size; +} + +static const struct file_operations spufs_regs_fops = { + .open = spufs_regs_open, + .read = spufs_regs_read, + .write = spufs_regs_write, + .llseek = generic_file_llseek, +}; + +static ssize_t +spufs_fpcr_dump(struct spu_context *ctx, struct coredump_params *cprm) +{ + return spufs_dump_emit(cprm, &ctx->csa.lscsa->fpcr, + sizeof(ctx->csa.lscsa->fpcr)); +} + +static ssize_t +spufs_fpcr_read(struct file *file, char __user * buffer, + size_t size, loff_t * pos) +{ + int ret; + struct spu_context *ctx = file->private_data; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + ret = simple_read_from_buffer(buffer, size, pos, &ctx->csa.lscsa->fpcr, + sizeof(ctx->csa.lscsa->fpcr)); + spu_release_saved(ctx); + return ret; +} + +static ssize_t +spufs_fpcr_write(struct file *file, const char __user * buffer, + size_t size, loff_t * pos) +{ + struct spu_context *ctx = file->private_data; + struct spu_lscsa *lscsa = ctx->csa.lscsa; + int ret; + + if (*pos >= sizeof(lscsa->fpcr)) + return -EFBIG; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + + size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos, + buffer, size); + + spu_release_saved(ctx); + return size; +} + +static const struct file_operations spufs_fpcr_fops = { + .open = spufs_regs_open, + .read = spufs_fpcr_read, + .write = spufs_fpcr_write, + .llseek = generic_file_llseek, +}; + +/* generic open function for all pipe-like files */ +static int spufs_pipe_open(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + file->private_data = i->i_ctx; + + return stream_open(inode, file); +} + +/* + * Read as many bytes from the mailbox as possible, until + * one of the conditions becomes true: + * + * - no more data available in the mailbox + * - end of the user provided buffer + * - end of the mapped area + */ +static ssize_t spufs_mbox_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) +{ + struct spu_context *ctx = file->private_data; + u32 mbox_data, __user *udata = (void __user *)buf; + ssize_t count; + + if (len < 4) + return -EINVAL; + + count = spu_acquire(ctx); + if (count) + return count; + + for (count = 0; (count + 4) <= len; count += 4, udata++) { + int ret; + ret = ctx->ops->mbox_read(ctx, &mbox_data); + if (ret == 0) + break; + + /* + * at the end of the mapped area, we can fault + * but still need to return the data we have + * read successfully so far. + */ + ret = put_user(mbox_data, udata); + if (ret) { + if (!count) + count = -EFAULT; + break; + } + } + spu_release(ctx); + + if (!count) + count = -EAGAIN; + + return count; +} + +static const struct file_operations spufs_mbox_fops = { + .open = spufs_pipe_open, + .read = spufs_mbox_read, + .llseek = no_llseek, +}; + +static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) +{ + struct spu_context *ctx = file->private_data; + ssize_t ret; + u32 mbox_stat; + + if (len < 4) + return -EINVAL; + + ret = spu_acquire(ctx); + if (ret) + return ret; + + mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff; + + spu_release(ctx); + + if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat)) + return -EFAULT; + + return 4; +} + +static const struct file_operations spufs_mbox_stat_fops = { + .open = spufs_pipe_open, + .read = spufs_mbox_stat_read, + .llseek = no_llseek, +}; + +/* low-level ibox access function */ +size_t spu_ibox_read(struct spu_context *ctx, u32 *data) +{ + return ctx->ops->ibox_read(ctx, data); +} + +/* interrupt-level ibox callback function. */ +void spufs_ibox_callback(struct spu *spu) +{ + struct spu_context *ctx = spu->ctx; + + if (ctx) + wake_up_all(&ctx->ibox_wq); +} + +/* + * Read as many bytes from the interrupt mailbox as possible, until + * one of the conditions becomes true: + * + * - no more data available in the mailbox + * - end of the user provided buffer + * - end of the mapped area + * + * If the file is opened without O_NONBLOCK, we wait here until + * any data is available, but return when we have been able to + * read something. + */ +static ssize_t spufs_ibox_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) +{ + struct spu_context *ctx = file->private_data; + u32 ibox_data, __user *udata = (void __user *)buf; + ssize_t count; + + if (len < 4) + return -EINVAL; + + count = spu_acquire(ctx); + if (count) + goto out; + + /* wait only for the first element */ + count = 0; + if (file->f_flags & O_NONBLOCK) { + if (!spu_ibox_read(ctx, &ibox_data)) { + count = -EAGAIN; + goto out_unlock; + } + } else { + count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); + if (count) + goto out; + } + + /* if we can't write at all, return -EFAULT */ + count = put_user(ibox_data, udata); + if (count) + goto out_unlock; + + for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { + int ret; + ret = ctx->ops->ibox_read(ctx, &ibox_data); + if (ret == 0) + break; + /* + * at the end of the mapped area, we can fault + * but still need to return the data we have + * read successfully so far. + */ + ret = put_user(ibox_data, udata); + if (ret) + break; + } + +out_unlock: + spu_release(ctx); +out: + return count; +} + +static __poll_t spufs_ibox_poll(struct file *file, poll_table *wait) +{ + struct spu_context *ctx = file->private_data; + __poll_t mask; + + poll_wait(file, &ctx->ibox_wq, wait); + + /* + * For now keep this uninterruptible and also ignore the rule + * that poll should not sleep. Will be fixed later. + */ + mutex_lock(&ctx->state_mutex); + mask = ctx->ops->mbox_stat_poll(ctx, EPOLLIN | EPOLLRDNORM); + spu_release(ctx); + + return mask; +} + +static const struct file_operations spufs_ibox_fops = { + .open = spufs_pipe_open, + .read = spufs_ibox_read, + .poll = spufs_ibox_poll, + .llseek = no_llseek, +}; + +static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) +{ + struct spu_context *ctx = file->private_data; + ssize_t ret; + u32 ibox_stat; + + if (len < 4) + return -EINVAL; + + ret = spu_acquire(ctx); + if (ret) + return ret; + ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff; + spu_release(ctx); + + if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat)) + return -EFAULT; + + return 4; +} + +static const struct file_operations spufs_ibox_stat_fops = { + .open = spufs_pipe_open, + .read = spufs_ibox_stat_read, + .llseek = no_llseek, +}; + +/* low-level mailbox write */ +size_t spu_wbox_write(struct spu_context *ctx, u32 data) +{ + return ctx->ops->wbox_write(ctx, data); +} + +/* interrupt-level wbox callback function. */ +void spufs_wbox_callback(struct spu *spu) +{ + struct spu_context *ctx = spu->ctx; + + if (ctx) + wake_up_all(&ctx->wbox_wq); +} + +/* + * Write as many bytes to the interrupt mailbox as possible, until + * one of the conditions becomes true: + * + * - the mailbox is full + * - end of the user provided buffer + * - end of the mapped area + * + * If the file is opened without O_NONBLOCK, we wait here until + * space is available, but return when we have been able to + * write something. + */ +static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, + size_t len, loff_t *pos) +{ + struct spu_context *ctx = file->private_data; + u32 wbox_data, __user *udata = (void __user *)buf; + ssize_t count; + + if (len < 4) + return -EINVAL; + + if (get_user(wbox_data, udata)) + return -EFAULT; + + count = spu_acquire(ctx); + if (count) + goto out; + + /* + * make sure we can at least write one element, by waiting + * in case of !O_NONBLOCK + */ + count = 0; + if (file->f_flags & O_NONBLOCK) { + if (!spu_wbox_write(ctx, wbox_data)) { + count = -EAGAIN; + goto out_unlock; + } + } else { + count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); + if (count) + goto out; + } + + + /* write as much as possible */ + for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { + int ret; + ret = get_user(wbox_data, udata); + if (ret) + break; + + ret = spu_wbox_write(ctx, wbox_data); + if (ret == 0) + break; + } + +out_unlock: + spu_release(ctx); +out: + return count; +} + +static __poll_t spufs_wbox_poll(struct file *file, poll_table *wait) +{ + struct spu_context *ctx = file->private_data; + __poll_t mask; + + poll_wait(file, &ctx->wbox_wq, wait); + + /* + * For now keep this uninterruptible and also ignore the rule + * that poll should not sleep. Will be fixed later. + */ + mutex_lock(&ctx->state_mutex); + mask = ctx->ops->mbox_stat_poll(ctx, EPOLLOUT | EPOLLWRNORM); + spu_release(ctx); + + return mask; +} + +static const struct file_operations spufs_wbox_fops = { + .open = spufs_pipe_open, + .write = spufs_wbox_write, + .poll = spufs_wbox_poll, + .llseek = no_llseek, +}; + +static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) +{ + struct spu_context *ctx = file->private_data; + ssize_t ret; + u32 wbox_stat; + + if (len < 4) + return -EINVAL; + + ret = spu_acquire(ctx); + if (ret) + return ret; + wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff; + spu_release(ctx); + + if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat)) + return -EFAULT; + + return 4; +} + +static const struct file_operations spufs_wbox_stat_fops = { + .open = spufs_pipe_open, + .read = spufs_wbox_stat_read, + .llseek = no_llseek, +}; + +static int spufs_signal1_open(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + mutex_lock(&ctx->mapping_lock); + file->private_data = ctx; + if (!i->i_openers++) + ctx->signal1 = inode->i_mapping; + mutex_unlock(&ctx->mapping_lock); + return nonseekable_open(inode, file); +} + +static int +spufs_signal1_release(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + mutex_lock(&ctx->mapping_lock); + if (!--i->i_openers) + ctx->signal1 = NULL; + mutex_unlock(&ctx->mapping_lock); + return 0; +} + +static ssize_t spufs_signal1_dump(struct spu_context *ctx, + struct coredump_params *cprm) +{ + if (!ctx->csa.spu_chnlcnt_RW[3]) + return 0; + return spufs_dump_emit(cprm, &ctx->csa.spu_chnldata_RW[3], + sizeof(ctx->csa.spu_chnldata_RW[3])); +} + +static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf, + size_t len) +{ + if (len < sizeof(ctx->csa.spu_chnldata_RW[3])) + return -EINVAL; + if (!ctx->csa.spu_chnlcnt_RW[3]) + return 0; + if (copy_to_user(buf, &ctx->csa.spu_chnldata_RW[3], + sizeof(ctx->csa.spu_chnldata_RW[3]))) + return -EFAULT; + return sizeof(ctx->csa.spu_chnldata_RW[3]); +} + +static ssize_t spufs_signal1_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) +{ + int ret; + struct spu_context *ctx = file->private_data; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + ret = __spufs_signal1_read(ctx, buf, len); + spu_release_saved(ctx); + + return ret; +} + +static ssize_t spufs_signal1_write(struct file *file, const char __user *buf, + size_t len, loff_t *pos) +{ + struct spu_context *ctx; + ssize_t ret; + u32 data; + + ctx = file->private_data; + + if (len < 4) + return -EINVAL; + + if (copy_from_user(&data, buf, 4)) + return -EFAULT; + + ret = spu_acquire(ctx); + if (ret) + return ret; + ctx->ops->signal1_write(ctx, data); + spu_release(ctx); + + return 4; +} + +static vm_fault_t +spufs_signal1_mmap_fault(struct vm_fault *vmf) +{ +#if SPUFS_SIGNAL_MAP_SIZE == 0x1000 + return spufs_ps_fault(vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE); +#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 + /* For 64k pages, both signal1 and signal2 can be used to mmap the whole + * signal 1 and 2 area + */ + return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); +#else +#error unsupported page size +#endif +} + +static const struct vm_operations_struct spufs_signal1_mmap_vmops = { + .fault = spufs_signal1_mmap_fault, +}; + +static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) +{ + if (!(vma->vm_flags & VM_SHARED)) + return -EINVAL; + + vm_flags_set(vma, VM_IO | VM_PFNMAP); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + vma->vm_ops = &spufs_signal1_mmap_vmops; + return 0; +} + +static const struct file_operations spufs_signal1_fops = { + .open = spufs_signal1_open, + .release = spufs_signal1_release, + .read = spufs_signal1_read, + .write = spufs_signal1_write, + .mmap = spufs_signal1_mmap, + .llseek = no_llseek, +}; + +static const struct file_operations spufs_signal1_nosched_fops = { + .open = spufs_signal1_open, + .release = spufs_signal1_release, + .write = spufs_signal1_write, + .mmap = spufs_signal1_mmap, + .llseek = no_llseek, +}; + +static int spufs_signal2_open(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + mutex_lock(&ctx->mapping_lock); + file->private_data = ctx; + if (!i->i_openers++) + ctx->signal2 = inode->i_mapping; + mutex_unlock(&ctx->mapping_lock); + return nonseekable_open(inode, file); +} + +static int +spufs_signal2_release(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + mutex_lock(&ctx->mapping_lock); + if (!--i->i_openers) + ctx->signal2 = NULL; + mutex_unlock(&ctx->mapping_lock); + return 0; +} + +static ssize_t spufs_signal2_dump(struct spu_context *ctx, + struct coredump_params *cprm) +{ + if (!ctx->csa.spu_chnlcnt_RW[4]) + return 0; + return spufs_dump_emit(cprm, &ctx->csa.spu_chnldata_RW[4], + sizeof(ctx->csa.spu_chnldata_RW[4])); +} + +static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf, + size_t len) +{ + if (len < sizeof(ctx->csa.spu_chnldata_RW[4])) + return -EINVAL; + if (!ctx->csa.spu_chnlcnt_RW[4]) + return 0; + if (copy_to_user(buf, &ctx->csa.spu_chnldata_RW[4], + sizeof(ctx->csa.spu_chnldata_RW[4]))) + return -EFAULT; + return sizeof(ctx->csa.spu_chnldata_RW[4]); +} + +static ssize_t spufs_signal2_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) +{ + struct spu_context *ctx = file->private_data; + int ret; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + ret = __spufs_signal2_read(ctx, buf, len); + spu_release_saved(ctx); + + return ret; +} + +static ssize_t spufs_signal2_write(struct file *file, const char __user *buf, + size_t len, loff_t *pos) +{ + struct spu_context *ctx; + ssize_t ret; + u32 data; + + ctx = file->private_data; + + if (len < 4) + return -EINVAL; + + if (copy_from_user(&data, buf, 4)) + return -EFAULT; + + ret = spu_acquire(ctx); + if (ret) + return ret; + ctx->ops->signal2_write(ctx, data); + spu_release(ctx); + + return 4; +} + +#if SPUFS_MMAP_4K +static vm_fault_t +spufs_signal2_mmap_fault(struct vm_fault *vmf) +{ +#if SPUFS_SIGNAL_MAP_SIZE == 0x1000 + return spufs_ps_fault(vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE); +#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 + /* For 64k pages, both signal1 and signal2 can be used to mmap the whole + * signal 1 and 2 area + */ + return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); +#else +#error unsupported page size +#endif +} + +static const struct vm_operations_struct spufs_signal2_mmap_vmops = { + .fault = spufs_signal2_mmap_fault, +}; + +static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) +{ + if (!(vma->vm_flags & VM_SHARED)) + return -EINVAL; + + vm_flags_set(vma, VM_IO | VM_PFNMAP); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + vma->vm_ops = &spufs_signal2_mmap_vmops; + return 0; +} +#else /* SPUFS_MMAP_4K */ +#define spufs_signal2_mmap NULL +#endif /* !SPUFS_MMAP_4K */ + +static const struct file_operations spufs_signal2_fops = { + .open = spufs_signal2_open, + .release = spufs_signal2_release, + .read = spufs_signal2_read, + .write = spufs_signal2_write, + .mmap = spufs_signal2_mmap, + .llseek = no_llseek, +}; + +static const struct file_operations spufs_signal2_nosched_fops = { + .open = spufs_signal2_open, + .release = spufs_signal2_release, + .write = spufs_signal2_write, + .mmap = spufs_signal2_mmap, + .llseek = no_llseek, +}; + +/* + * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the + * work of acquiring (or not) the SPU context before calling through + * to the actual get routine. The set routine is called directly. + */ +#define SPU_ATTR_NOACQUIRE 0 +#define SPU_ATTR_ACQUIRE 1 +#define SPU_ATTR_ACQUIRE_SAVED 2 + +#define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \ +static int __##__get(void *data, u64 *val) \ +{ \ + struct spu_context *ctx = data; \ + int ret = 0; \ + \ + if (__acquire == SPU_ATTR_ACQUIRE) { \ + ret = spu_acquire(ctx); \ + if (ret) \ + return ret; \ + *val = __get(ctx); \ + spu_release(ctx); \ + } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \ + ret = spu_acquire_saved(ctx); \ + if (ret) \ + return ret; \ + *val = __get(ctx); \ + spu_release_saved(ctx); \ + } else \ + *val = __get(ctx); \ + \ + return 0; \ +} \ +DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt); + +static int spufs_signal1_type_set(void *data, u64 val) +{ + struct spu_context *ctx = data; + int ret; + + ret = spu_acquire(ctx); + if (ret) + return ret; + ctx->ops->signal1_type_set(ctx, val); + spu_release(ctx); + + return 0; +} + +static u64 spufs_signal1_type_get(struct spu_context *ctx) +{ + return ctx->ops->signal1_type_get(ctx); +} +DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get, + spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE); + + +static int spufs_signal2_type_set(void *data, u64 val) +{ + struct spu_context *ctx = data; + int ret; + + ret = spu_acquire(ctx); + if (ret) + return ret; + ctx->ops->signal2_type_set(ctx, val); + spu_release(ctx); + + return 0; +} + +static u64 spufs_signal2_type_get(struct spu_context *ctx) +{ + return ctx->ops->signal2_type_get(ctx); +} +DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, + spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE); + +#if SPUFS_MMAP_4K +static vm_fault_t +spufs_mss_mmap_fault(struct vm_fault *vmf) +{ + return spufs_ps_fault(vmf, 0x0000, SPUFS_MSS_MAP_SIZE); +} + +static const struct vm_operations_struct spufs_mss_mmap_vmops = { + .fault = spufs_mss_mmap_fault, +}; + +/* + * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. + */ +static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) +{ + if (!(vma->vm_flags & VM_SHARED)) + return -EINVAL; + + vm_flags_set(vma, VM_IO | VM_PFNMAP); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + vma->vm_ops = &spufs_mss_mmap_vmops; + return 0; +} +#else /* SPUFS_MMAP_4K */ +#define spufs_mss_mmap NULL +#endif /* !SPUFS_MMAP_4K */ + +static int spufs_mss_open(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + file->private_data = i->i_ctx; + + mutex_lock(&ctx->mapping_lock); + if (!i->i_openers++) + ctx->mss = inode->i_mapping; + mutex_unlock(&ctx->mapping_lock); + return nonseekable_open(inode, file); +} + +static int +spufs_mss_release(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + mutex_lock(&ctx->mapping_lock); + if (!--i->i_openers) + ctx->mss = NULL; + mutex_unlock(&ctx->mapping_lock); + return 0; +} + +static const struct file_operations spufs_mss_fops = { + .open = spufs_mss_open, + .release = spufs_mss_release, + .mmap = spufs_mss_mmap, + .llseek = no_llseek, +}; + +static vm_fault_t +spufs_psmap_mmap_fault(struct vm_fault *vmf) +{ + return spufs_ps_fault(vmf, 0x0000, SPUFS_PS_MAP_SIZE); +} + +static const struct vm_operations_struct spufs_psmap_mmap_vmops = { + .fault = spufs_psmap_mmap_fault, +}; + +/* + * mmap support for full problem state area [0x00000 - 0x1ffff]. + */ +static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma) +{ + if (!(vma->vm_flags & VM_SHARED)) + return -EINVAL; + + vm_flags_set(vma, VM_IO | VM_PFNMAP); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + vma->vm_ops = &spufs_psmap_mmap_vmops; + return 0; +} + +static int spufs_psmap_open(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + mutex_lock(&ctx->mapping_lock); + file->private_data = i->i_ctx; + if (!i->i_openers++) + ctx->psmap = inode->i_mapping; + mutex_unlock(&ctx->mapping_lock); + return nonseekable_open(inode, file); +} + +static int +spufs_psmap_release(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + mutex_lock(&ctx->mapping_lock); + if (!--i->i_openers) + ctx->psmap = NULL; + mutex_unlock(&ctx->mapping_lock); + return 0; +} + +static const struct file_operations spufs_psmap_fops = { + .open = spufs_psmap_open, + .release = spufs_psmap_release, + .mmap = spufs_psmap_mmap, + .llseek = no_llseek, +}; + + +#if SPUFS_MMAP_4K +static vm_fault_t +spufs_mfc_mmap_fault(struct vm_fault *vmf) +{ + return spufs_ps_fault(vmf, 0x3000, SPUFS_MFC_MAP_SIZE); +} + +static const struct vm_operations_struct spufs_mfc_mmap_vmops = { + .fault = spufs_mfc_mmap_fault, +}; + +/* + * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. + */ +static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) +{ + if (!(vma->vm_flags & VM_SHARED)) + return -EINVAL; + + vm_flags_set(vma, VM_IO | VM_PFNMAP); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + vma->vm_ops = &spufs_mfc_mmap_vmops; + return 0; +} +#else /* SPUFS_MMAP_4K */ +#define spufs_mfc_mmap NULL +#endif /* !SPUFS_MMAP_4K */ + +static int spufs_mfc_open(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + /* we don't want to deal with DMA into other processes */ + if (ctx->owner != current->mm) + return -EINVAL; + + if (atomic_read(&inode->i_count) != 1) + return -EBUSY; + + mutex_lock(&ctx->mapping_lock); + file->private_data = ctx; + if (!i->i_openers++) + ctx->mfc = inode->i_mapping; + mutex_unlock(&ctx->mapping_lock); + return nonseekable_open(inode, file); +} + +static int +spufs_mfc_release(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + + mutex_lock(&ctx->mapping_lock); + if (!--i->i_openers) + ctx->mfc = NULL; + mutex_unlock(&ctx->mapping_lock); + return 0; +} + +/* interrupt-level mfc callback function. */ +void spufs_mfc_callback(struct spu *spu) +{ + struct spu_context *ctx = spu->ctx; + + if (ctx) + wake_up_all(&ctx->mfc_wq); +} + +static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status) +{ + /* See if there is one tag group is complete */ + /* FIXME we need locking around tagwait */ + *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait; + ctx->tagwait &= ~*status; + if (*status) + return 1; + + /* enable interrupt waiting for any tag group, + may silently fail if interrupts are already enabled */ + ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); + return 0; +} + +static ssize_t spufs_mfc_read(struct file *file, char __user *buffer, + size_t size, loff_t *pos) +{ + struct spu_context *ctx = file->private_data; + int ret = -EINVAL; + u32 status; + + if (size != 4) + goto out; + + ret = spu_acquire(ctx); + if (ret) + return ret; + + ret = -EINVAL; + if (file->f_flags & O_NONBLOCK) { + status = ctx->ops->read_mfc_tagstatus(ctx); + if (!(status & ctx->tagwait)) + ret = -EAGAIN; + else + /* XXX(hch): shouldn't we clear ret here? */ + ctx->tagwait &= ~status; + } else { + ret = spufs_wait(ctx->mfc_wq, + spufs_read_mfc_tagstatus(ctx, &status)); + if (ret) + goto out; + } + spu_release(ctx); + + ret = 4; + if (copy_to_user(buffer, &status, 4)) + ret = -EFAULT; + +out: + return ret; +} + +static int spufs_check_valid_dma(struct mfc_dma_command *cmd) +{ + pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa, + cmd->ea, cmd->size, cmd->tag, cmd->cmd); + + switch (cmd->cmd) { + case MFC_PUT_CMD: + case MFC_PUTF_CMD: + case MFC_PUTB_CMD: + case MFC_GET_CMD: + case MFC_GETF_CMD: + case MFC_GETB_CMD: + break; + default: + pr_debug("invalid DMA opcode %x\n", cmd->cmd); + return -EIO; + } + + if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) { + pr_debug("invalid DMA alignment, ea %llx lsa %x\n", + cmd->ea, cmd->lsa); + return -EIO; + } + + switch (cmd->size & 0xf) { + case 1: + break; + case 2: + if (cmd->lsa & 1) + goto error; + break; + case 4: + if (cmd->lsa & 3) + goto error; + break; + case 8: + if (cmd->lsa & 7) + goto error; + break; + case 0: + if (cmd->lsa & 15) + goto error; + break; + error: + default: + pr_debug("invalid DMA alignment %x for size %x\n", + cmd->lsa & 0xf, cmd->size); + return -EIO; + } + + if (cmd->size > 16 * 1024) { + pr_debug("invalid DMA size %x\n", cmd->size); + return -EIO; + } + + if (cmd->tag & 0xfff0) { + /* we reserve the higher tag numbers for kernel use */ + pr_debug("invalid DMA tag\n"); + return -EIO; + } + + if (cmd->class) { + /* not supported in this version */ + pr_debug("invalid DMA class\n"); + return -EIO; + } + + return 0; +} + +static int spu_send_mfc_command(struct spu_context *ctx, + struct mfc_dma_command cmd, + int *error) +{ + *error = ctx->ops->send_mfc_command(ctx, &cmd); + if (*error == -EAGAIN) { + /* wait for any tag group to complete + so we have space for the new command */ + ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); + /* try again, because the queue might be + empty again */ + *error = ctx->ops->send_mfc_command(ctx, &cmd); + if (*error == -EAGAIN) + return 0; + } + return 1; +} + +static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, + size_t size, loff_t *pos) +{ + struct spu_context *ctx = file->private_data; + struct mfc_dma_command cmd; + int ret = -EINVAL; + + if (size != sizeof cmd) + goto out; + + ret = -EFAULT; + if (copy_from_user(&cmd, buffer, sizeof cmd)) + goto out; + + ret = spufs_check_valid_dma(&cmd); + if (ret) + goto out; + + ret = spu_acquire(ctx); + if (ret) + goto out; + + ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); + if (ret) + goto out; + + if (file->f_flags & O_NONBLOCK) { + ret = ctx->ops->send_mfc_command(ctx, &cmd); + } else { + int status; + ret = spufs_wait(ctx->mfc_wq, + spu_send_mfc_command(ctx, cmd, &status)); + if (ret) + goto out; + if (status) + ret = status; + } + + if (ret) + goto out_unlock; + + ctx->tagwait |= 1 << cmd.tag; + ret = size; + +out_unlock: + spu_release(ctx); +out: + return ret; +} + +static __poll_t spufs_mfc_poll(struct file *file,poll_table *wait) +{ + struct spu_context *ctx = file->private_data; + u32 free_elements, tagstatus; + __poll_t mask; + + poll_wait(file, &ctx->mfc_wq, wait); + + /* + * For now keep this uninterruptible and also ignore the rule + * that poll should not sleep. Will be fixed later. + */ + mutex_lock(&ctx->state_mutex); + ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2); + free_elements = ctx->ops->get_mfc_free_elements(ctx); + tagstatus = ctx->ops->read_mfc_tagstatus(ctx); + spu_release(ctx); + + mask = 0; + if (free_elements & 0xffff) + mask |= EPOLLOUT | EPOLLWRNORM; + if (tagstatus & ctx->tagwait) + mask |= EPOLLIN | EPOLLRDNORM; + + pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__, + free_elements, tagstatus, ctx->tagwait); + + return mask; +} + +static int spufs_mfc_flush(struct file *file, fl_owner_t id) +{ + struct spu_context *ctx = file->private_data; + int ret; + + ret = spu_acquire(ctx); + if (ret) + goto out; +#if 0 +/* this currently hangs */ + ret = spufs_wait(ctx->mfc_wq, + ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2)); + if (ret) + goto out; + ret = spufs_wait(ctx->mfc_wq, + ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); + if (ret) + goto out; +#else + ret = 0; +#endif + spu_release(ctx); +out: + return ret; +} + +static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync) +{ + struct inode *inode = file_inode(file); + int err = file_write_and_wait_range(file, start, end); + if (!err) { + inode_lock(inode); + err = spufs_mfc_flush(file, NULL); + inode_unlock(inode); + } + return err; +} + +static const struct file_operations spufs_mfc_fops = { + .open = spufs_mfc_open, + .release = spufs_mfc_release, + .read = spufs_mfc_read, + .write = spufs_mfc_write, + .poll = spufs_mfc_poll, + .flush = spufs_mfc_flush, + .fsync = spufs_mfc_fsync, + .mmap = spufs_mfc_mmap, + .llseek = no_llseek, +}; + +static int spufs_npc_set(void *data, u64 val) +{ + struct spu_context *ctx = data; + int ret; + + ret = spu_acquire(ctx); + if (ret) + return ret; + ctx->ops->npc_write(ctx, val); + spu_release(ctx); + + return 0; +} + +static u64 spufs_npc_get(struct spu_context *ctx) +{ + return ctx->ops->npc_read(ctx); +} +DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, + "0x%llx\n", SPU_ATTR_ACQUIRE); + +static int spufs_decr_set(void *data, u64 val) +{ + struct spu_context *ctx = data; + struct spu_lscsa *lscsa = ctx->csa.lscsa; + int ret; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + lscsa->decr.slot[0] = (u32) val; + spu_release_saved(ctx); + + return 0; +} + +static u64 spufs_decr_get(struct spu_context *ctx) +{ + struct spu_lscsa *lscsa = ctx->csa.lscsa; + return lscsa->decr.slot[0]; +} +DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set, + "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED); + +static int spufs_decr_status_set(void *data, u64 val) +{ + struct spu_context *ctx = data; + int ret; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + if (val) + ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING; + else + ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING; + spu_release_saved(ctx); + + return 0; +} + +static u64 spufs_decr_status_get(struct spu_context *ctx) +{ + if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) + return SPU_DECR_STATUS_RUNNING; + else + return 0; +} +DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get, + spufs_decr_status_set, "0x%llx\n", + SPU_ATTR_ACQUIRE_SAVED); + +static int spufs_event_mask_set(void *data, u64 val) +{ + struct spu_context *ctx = data; + struct spu_lscsa *lscsa = ctx->csa.lscsa; + int ret; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + lscsa->event_mask.slot[0] = (u32) val; + spu_release_saved(ctx); + + return 0; +} + +static u64 spufs_event_mask_get(struct spu_context *ctx) +{ + struct spu_lscsa *lscsa = ctx->csa.lscsa; + return lscsa->event_mask.slot[0]; +} + +DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get, + spufs_event_mask_set, "0x%llx\n", + SPU_ATTR_ACQUIRE_SAVED); + +static u64 spufs_event_status_get(struct spu_context *ctx) +{ + struct spu_state *state = &ctx->csa; + u64 stat; + stat = state->spu_chnlcnt_RW[0]; + if (stat) + return state->spu_chnldata_RW[0]; + return 0; +} +DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get, + NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) + +static int spufs_srr0_set(void *data, u64 val) +{ + struct spu_context *ctx = data; + struct spu_lscsa *lscsa = ctx->csa.lscsa; + int ret; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + lscsa->srr0.slot[0] = (u32) val; + spu_release_saved(ctx); + + return 0; +} + +static u64 spufs_srr0_get(struct spu_context *ctx) +{ + struct spu_lscsa *lscsa = ctx->csa.lscsa; + return lscsa->srr0.slot[0]; +} +DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set, + "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) + +static u64 spufs_id_get(struct spu_context *ctx) +{ + u64 num; + + if (ctx->state == SPU_STATE_RUNNABLE) + num = ctx->spu->number; + else + num = (unsigned int)-1; + + return num; +} +DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n", + SPU_ATTR_ACQUIRE) + +static u64 spufs_object_id_get(struct spu_context *ctx) +{ + /* FIXME: Should there really be no locking here? */ + return ctx->object_id; +} + +static int spufs_object_id_set(void *data, u64 id) +{ + struct spu_context *ctx = data; + ctx->object_id = id; + + return 0; +} + +DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get, + spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE); + +static u64 spufs_lslr_get(struct spu_context *ctx) +{ + return ctx->csa.priv2.spu_lslr_RW; +} +DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n", + SPU_ATTR_ACQUIRE_SAVED); + +static int spufs_info_open(struct inode *inode, struct file *file) +{ + struct spufs_inode_info *i = SPUFS_I(inode); + struct spu_context *ctx = i->i_ctx; + file->private_data = ctx; + return 0; +} + +static int spufs_caps_show(struct seq_file *s, void *private) +{ + struct spu_context *ctx = s->private; + + if (!(ctx->flags & SPU_CREATE_NOSCHED)) + seq_puts(s, "sched\n"); + if (!(ctx->flags & SPU_CREATE_ISOLATE)) + seq_puts(s, "step\n"); + return 0; +} + +static int spufs_caps_open(struct inode *inode, struct file *file) +{ + return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx); +} + +static const struct file_operations spufs_caps_fops = { + .open = spufs_caps_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static ssize_t spufs_mbox_info_dump(struct spu_context *ctx, + struct coredump_params *cprm) +{ + if (!(ctx->csa.prob.mb_stat_R & 0x0000ff)) + return 0; + return spufs_dump_emit(cprm, &ctx->csa.prob.pu_mb_R, + sizeof(ctx->csa.prob.pu_mb_R)); +} + +static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) +{ + struct spu_context *ctx = file->private_data; + u32 stat, data; + int ret; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); + stat = ctx->csa.prob.mb_stat_R; + data = ctx->csa.prob.pu_mb_R; + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + + /* EOF if there's no entry in the mbox */ + if (!(stat & 0x0000ff)) + return 0; + + return simple_read_from_buffer(buf, len, pos, &data, sizeof(data)); +} + +static const struct file_operations spufs_mbox_info_fops = { + .open = spufs_info_open, + .read = spufs_mbox_info_read, + .llseek = generic_file_llseek, +}; + +static ssize_t spufs_ibox_info_dump(struct spu_context *ctx, + struct coredump_params *cprm) +{ + if (!(ctx->csa.prob.mb_stat_R & 0xff0000)) + return 0; + return spufs_dump_emit(cprm, &ctx->csa.priv2.puint_mb_R, + sizeof(ctx->csa.priv2.puint_mb_R)); +} + +static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) +{ + struct spu_context *ctx = file->private_data; + u32 stat, data; + int ret; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); + stat = ctx->csa.prob.mb_stat_R; + data = ctx->csa.priv2.puint_mb_R; + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + + /* EOF if there's no entry in the ibox */ + if (!(stat & 0xff0000)) + return 0; + + return simple_read_from_buffer(buf, len, pos, &data, sizeof(data)); +} + +static const struct file_operations spufs_ibox_info_fops = { + .open = spufs_info_open, + .read = spufs_ibox_info_read, + .llseek = generic_file_llseek, +}; + +static size_t spufs_wbox_info_cnt(struct spu_context *ctx) +{ + return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32); +} + +static ssize_t spufs_wbox_info_dump(struct spu_context *ctx, + struct coredump_params *cprm) +{ + return spufs_dump_emit(cprm, &ctx->csa.spu_mailbox_data, + spufs_wbox_info_cnt(ctx)); +} + +static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) +{ + struct spu_context *ctx = file->private_data; + u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)]; + int ret, count; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); + count = spufs_wbox_info_cnt(ctx); + memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data)); + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + + return simple_read_from_buffer(buf, len, pos, &data, + count * sizeof(u32)); +} + +static const struct file_operations spufs_wbox_info_fops = { + .open = spufs_info_open, + .read = spufs_wbox_info_read, + .llseek = generic_file_llseek, +}; + +static void spufs_get_dma_info(struct spu_context *ctx, + struct spu_dma_info *info) +{ + int i; + + info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; + info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; + info->dma_info_status = ctx->csa.spu_chnldata_RW[24]; + info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; + info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; + for (i = 0; i < 16; i++) { + struct mfc_cq_sr *qp = &info->dma_info_command_data[i]; + struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i]; + + qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; + qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; + qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; + qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; + } +} + +static ssize_t spufs_dma_info_dump(struct spu_context *ctx, + struct coredump_params *cprm) +{ + struct spu_dma_info info; + + spufs_get_dma_info(ctx, &info); + return spufs_dump_emit(cprm, &info, sizeof(info)); +} + +static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) +{ + struct spu_context *ctx = file->private_data; + struct spu_dma_info info; + int ret; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); + spufs_get_dma_info(ctx, &info); + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + + return simple_read_from_buffer(buf, len, pos, &info, + sizeof(info)); +} + +static const struct file_operations spufs_dma_info_fops = { + .open = spufs_info_open, + .read = spufs_dma_info_read, + .llseek = no_llseek, +}; + +static void spufs_get_proxydma_info(struct spu_context *ctx, + struct spu_proxydma_info *info) +{ + int i; + + info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW; + info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; + info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; + + for (i = 0; i < 8; i++) { + struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i]; + struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i]; + + qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; + qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; + qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; + qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; + } +} + +static ssize_t spufs_proxydma_info_dump(struct spu_context *ctx, + struct coredump_params *cprm) +{ + struct spu_proxydma_info info; + + spufs_get_proxydma_info(ctx, &info); + return spufs_dump_emit(cprm, &info, sizeof(info)); +} + +static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) +{ + struct spu_context *ctx = file->private_data; + struct spu_proxydma_info info; + int ret; + + if (len < sizeof(info)) + return -EINVAL; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); + spufs_get_proxydma_info(ctx, &info); + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + + return simple_read_from_buffer(buf, len, pos, &info, + sizeof(info)); +} + +static const struct file_operations spufs_proxydma_info_fops = { + .open = spufs_info_open, + .read = spufs_proxydma_info_read, + .llseek = no_llseek, +}; + +static int spufs_show_tid(struct seq_file *s, void *private) +{ + struct spu_context *ctx = s->private; + + seq_printf(s, "%d\n", ctx->tid); + return 0; +} + +static int spufs_tid_open(struct inode *inode, struct file *file) +{ + return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx); +} + +static const struct file_operations spufs_tid_fops = { + .open = spufs_tid_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const char *ctx_state_names[] = { + "user", "system", "iowait", "loaded" +}; + +static unsigned long long spufs_acct_time(struct spu_context *ctx, + enum spu_utilization_state state) +{ + unsigned long long time = ctx->stats.times[state]; + + /* + * In general, utilization statistics are updated by the controlling + * thread as the spu context moves through various well defined + * state transitions, but if the context is lazily loaded its + * utilization statistics are not updated as the controlling thread + * is not tightly coupled with the execution of the spu context. We + * calculate and apply the time delta from the last recorded state + * of the spu context. + */ + if (ctx->spu && ctx->stats.util_state == state) { + time += ktime_get_ns() - ctx->stats.tstamp; + } + + return time / NSEC_PER_MSEC; +} + +static unsigned long long spufs_slb_flts(struct spu_context *ctx) +{ + unsigned long long slb_flts = ctx->stats.slb_flt; + + if (ctx->state == SPU_STATE_RUNNABLE) { + slb_flts += (ctx->spu->stats.slb_flt - + ctx->stats.slb_flt_base); + } + + return slb_flts; +} + +static unsigned long long spufs_class2_intrs(struct spu_context *ctx) +{ + unsigned long long class2_intrs = ctx->stats.class2_intr; + + if (ctx->state == SPU_STATE_RUNNABLE) { + class2_intrs += (ctx->spu->stats.class2_intr - + ctx->stats.class2_intr_base); + } + + return class2_intrs; +} + + +static int spufs_show_stat(struct seq_file *s, void *private) +{ + struct spu_context *ctx = s->private; + int ret; + + ret = spu_acquire(ctx); + if (ret) + return ret; + + seq_printf(s, "%s %llu %llu %llu %llu " + "%llu %llu %llu %llu %llu %llu %llu %llu\n", + ctx_state_names[ctx->stats.util_state], + spufs_acct_time(ctx, SPU_UTIL_USER), + spufs_acct_time(ctx, SPU_UTIL_SYSTEM), + spufs_acct_time(ctx, SPU_UTIL_IOWAIT), + spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED), + ctx->stats.vol_ctx_switch, + ctx->stats.invol_ctx_switch, + spufs_slb_flts(ctx), + ctx->stats.hash_flt, + ctx->stats.min_flt, + ctx->stats.maj_flt, + spufs_class2_intrs(ctx), + ctx->stats.libassist); + spu_release(ctx); + return 0; +} + +static int spufs_stat_open(struct inode *inode, struct file *file) +{ + return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx); +} + +static const struct file_operations spufs_stat_fops = { + .open = spufs_stat_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static inline int spufs_switch_log_used(struct spu_context *ctx) +{ + return (ctx->switch_log->head - ctx->switch_log->tail) % + SWITCH_LOG_BUFSIZE; +} + +static inline int spufs_switch_log_avail(struct spu_context *ctx) +{ + return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx); +} + +static int spufs_switch_log_open(struct inode *inode, struct file *file) +{ + struct spu_context *ctx = SPUFS_I(inode)->i_ctx; + int rc; + + rc = spu_acquire(ctx); + if (rc) + return rc; + + if (ctx->switch_log) { + rc = -EBUSY; + goto out; + } + + ctx->switch_log = kmalloc(struct_size(ctx->switch_log, log, + SWITCH_LOG_BUFSIZE), GFP_KERNEL); + + if (!ctx->switch_log) { + rc = -ENOMEM; + goto out; + } + + ctx->switch_log->head = ctx->switch_log->tail = 0; + init_waitqueue_head(&ctx->switch_log->wait); + rc = 0; + +out: + spu_release(ctx); + return rc; +} + +static int spufs_switch_log_release(struct inode *inode, struct file *file) +{ + struct spu_context *ctx = SPUFS_I(inode)->i_ctx; + int rc; + + rc = spu_acquire(ctx); + if (rc) + return rc; + + kfree(ctx->switch_log); + ctx->switch_log = NULL; + spu_release(ctx); + + return 0; +} + +static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) +{ + struct switch_log_entry *p; + + p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE; + + return snprintf(tbuf, n, "%llu.%09u %d %u %u %llu\n", + (unsigned long long) p->tstamp.tv_sec, + (unsigned int) p->tstamp.tv_nsec, + p->spu_id, + (unsigned int) p->type, + (unsigned int) p->val, + (unsigned long long) p->timebase); +} + +static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos) +{ + struct inode *inode = file_inode(file); + struct spu_context *ctx = SPUFS_I(inode)->i_ctx; + int error = 0, cnt = 0; + + if (!buf) + return -EINVAL; + + error = spu_acquire(ctx); + if (error) + return error; + + while (cnt < len) { + char tbuf[128]; + int width; + + if (spufs_switch_log_used(ctx) == 0) { + if (cnt > 0) { + /* If there's data ready to go, we can + * just return straight away */ + break; + + } else if (file->f_flags & O_NONBLOCK) { + error = -EAGAIN; + break; + + } else { + /* spufs_wait will drop the mutex and + * re-acquire, but since we're in read(), the + * file cannot be _released (and so + * ctx->switch_log is stable). + */ + error = spufs_wait(ctx->switch_log->wait, + spufs_switch_log_used(ctx) > 0); + + /* On error, spufs_wait returns without the + * state mutex held */ + if (error) + return error; + + /* We may have had entries read from underneath + * us while we dropped the mutex in spufs_wait, + * so re-check */ + if (spufs_switch_log_used(ctx) == 0) + continue; + } + } + + width = switch_log_sprint(ctx, tbuf, sizeof(tbuf)); + if (width < len) + ctx->switch_log->tail = + (ctx->switch_log->tail + 1) % + SWITCH_LOG_BUFSIZE; + else + /* If the record is greater than space available return + * partial buffer (so far) */ + break; + + error = copy_to_user(buf + cnt, tbuf, width); + if (error) + break; + cnt += width; + } + + spu_release(ctx); + + return cnt == 0 ? error : cnt; +} + +static __poll_t spufs_switch_log_poll(struct file *file, poll_table *wait) +{ + struct inode *inode = file_inode(file); + struct spu_context *ctx = SPUFS_I(inode)->i_ctx; + __poll_t mask = 0; + int rc; + + poll_wait(file, &ctx->switch_log->wait, wait); + + rc = spu_acquire(ctx); + if (rc) + return rc; + + if (spufs_switch_log_used(ctx) > 0) + mask |= EPOLLIN; + + spu_release(ctx); + + return mask; +} + +static const struct file_operations spufs_switch_log_fops = { + .open = spufs_switch_log_open, + .read = spufs_switch_log_read, + .poll = spufs_switch_log_poll, + .release = spufs_switch_log_release, + .llseek = no_llseek, +}; + +/** + * Log a context switch event to a switch log reader. + * + * Must be called with ctx->state_mutex held. + */ +void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, + u32 type, u32 val) +{ + if (!ctx->switch_log) + return; + + if (spufs_switch_log_avail(ctx) > 1) { + struct switch_log_entry *p; + + p = ctx->switch_log->log + ctx->switch_log->head; + ktime_get_ts64(&p->tstamp); + p->timebase = get_tb(); + p->spu_id = spu ? spu->number : -1; + p->type = type; + p->val = val; + + ctx->switch_log->head = + (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE; + } + + wake_up(&ctx->switch_log->wait); +} + +static int spufs_show_ctx(struct seq_file *s, void *private) +{ + struct spu_context *ctx = s->private; + u64 mfc_control_RW; + + mutex_lock(&ctx->state_mutex); + if (ctx->spu) { + struct spu *spu = ctx->spu; + struct spu_priv2 __iomem *priv2 = spu->priv2; + + spin_lock_irq(&spu->register_lock); + mfc_control_RW = in_be64(&priv2->mfc_control_RW); + spin_unlock_irq(&spu->register_lock); + } else { + struct spu_state *csa = &ctx->csa; + + mfc_control_RW = csa->priv2.mfc_control_RW; + } + + seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)" + " %c %llx %llx %llx %llx %x %x\n", + ctx->state == SPU_STATE_SAVED ? 'S' : 'R', + ctx->flags, + ctx->sched_flags, + ctx->prio, + ctx->time_slice, + ctx->spu ? ctx->spu->number : -1, + !list_empty(&ctx->rq) ? 'q' : ' ', + ctx->csa.class_0_pending, + ctx->csa.class_0_dar, + ctx->csa.class_1_dsisr, + mfc_control_RW, + ctx->ops->runcntl_read(ctx), + ctx->ops->status_read(ctx)); + + mutex_unlock(&ctx->state_mutex); + + return 0; +} + +static int spufs_ctx_open(struct inode *inode, struct file *file) +{ + return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx); +} + +static const struct file_operations spufs_ctx_fops = { + .open = spufs_ctx_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +const struct spufs_tree_descr spufs_dir_contents[] = { + { "capabilities", &spufs_caps_fops, 0444, }, + { "mem", &spufs_mem_fops, 0666, LS_SIZE, }, + { "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), }, + { "mbox", &spufs_mbox_fops, 0444, }, + { "ibox", &spufs_ibox_fops, 0444, }, + { "wbox", &spufs_wbox_fops, 0222, }, + { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), }, + { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), }, + { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), }, + { "signal1", &spufs_signal1_fops, 0666, }, + { "signal2", &spufs_signal2_fops, 0666, }, + { "signal1_type", &spufs_signal1_type, 0666, }, + { "signal2_type", &spufs_signal2_type, 0666, }, + { "cntl", &spufs_cntl_fops, 0666, }, + { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), }, + { "lslr", &spufs_lslr_ops, 0444, }, + { "mfc", &spufs_mfc_fops, 0666, }, + { "mss", &spufs_mss_fops, 0666, }, + { "npc", &spufs_npc_ops, 0666, }, + { "srr0", &spufs_srr0_ops, 0666, }, + { "decr", &spufs_decr_ops, 0666, }, + { "decr_status", &spufs_decr_status_ops, 0666, }, + { "event_mask", &spufs_event_mask_ops, 0666, }, + { "event_status", &spufs_event_status_ops, 0444, }, + { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, + { "phys-id", &spufs_id_ops, 0666, }, + { "object-id", &spufs_object_id_ops, 0666, }, + { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), }, + { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), }, + { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), }, + { "dma_info", &spufs_dma_info_fops, 0444, + sizeof(struct spu_dma_info), }, + { "proxydma_info", &spufs_proxydma_info_fops, 0444, + sizeof(struct spu_proxydma_info)}, + { "tid", &spufs_tid_fops, 0444, }, + { "stat", &spufs_stat_fops, 0444, }, + { "switch_log", &spufs_switch_log_fops, 0444 }, + {}, +}; + +const struct spufs_tree_descr spufs_dir_nosched_contents[] = { + { "capabilities", &spufs_caps_fops, 0444, }, + { "mem", &spufs_mem_fops, 0666, LS_SIZE, }, + { "mbox", &spufs_mbox_fops, 0444, }, + { "ibox", &spufs_ibox_fops, 0444, }, + { "wbox", &spufs_wbox_fops, 0222, }, + { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), }, + { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), }, + { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), }, + { "signal1", &spufs_signal1_nosched_fops, 0222, }, + { "signal2", &spufs_signal2_nosched_fops, 0222, }, + { "signal1_type", &spufs_signal1_type, 0666, }, + { "signal2_type", &spufs_signal2_type, 0666, }, + { "mss", &spufs_mss_fops, 0666, }, + { "mfc", &spufs_mfc_fops, 0666, }, + { "cntl", &spufs_cntl_fops, 0666, }, + { "npc", &spufs_npc_ops, 0666, }, + { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, + { "phys-id", &spufs_id_ops, 0666, }, + { "object-id", &spufs_object_id_ops, 0666, }, + { "tid", &spufs_tid_fops, 0444, }, + { "stat", &spufs_stat_fops, 0444, }, + {}, +}; + +const struct spufs_tree_descr spufs_dir_debug_contents[] = { + { ".ctx", &spufs_ctx_fops, 0444, }, + {}, +}; + +const struct spufs_coredump_reader spufs_coredump_read[] = { + { "regs", spufs_regs_dump, NULL, sizeof(struct spu_reg128[128])}, + { "fpcr", spufs_fpcr_dump, NULL, sizeof(struct spu_reg128) }, + { "lslr", NULL, spufs_lslr_get, 19 }, + { "decr", NULL, spufs_decr_get, 19 }, + { "decr_status", NULL, spufs_decr_status_get, 19 }, + { "mem", spufs_mem_dump, NULL, LS_SIZE, }, + { "signal1", spufs_signal1_dump, NULL, sizeof(u32) }, + { "signal1_type", NULL, spufs_signal1_type_get, 19 }, + { "signal2", spufs_signal2_dump, NULL, sizeof(u32) }, + { "signal2_type", NULL, spufs_signal2_type_get, 19 }, + { "event_mask", NULL, spufs_event_mask_get, 19 }, + { "event_status", NULL, spufs_event_status_get, 19 }, + { "mbox_info", spufs_mbox_info_dump, NULL, sizeof(u32) }, + { "ibox_info", spufs_ibox_info_dump, NULL, sizeof(u32) }, + { "wbox_info", spufs_wbox_info_dump, NULL, 4 * sizeof(u32)}, + { "dma_info", spufs_dma_info_dump, NULL, sizeof(struct spu_dma_info)}, + { "proxydma_info", spufs_proxydma_info_dump, + NULL, sizeof(struct spu_proxydma_info)}, + { "object-id", NULL, spufs_object_id_get, 19 }, + { "npc", NULL, spufs_npc_get, 19 }, + { NULL }, +}; diff --git a/arch/powerpc/platforms/cell/spufs/gang.c b/arch/powerpc/platforms/cell/spufs/gang.c new file mode 100644 index 0000000000..827d338dea --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/gang.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SPU file system + * + * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 + * + * Author: Arnd Bergmann + */ + +#include +#include + +#include "spufs.h" + +struct spu_gang *alloc_spu_gang(void) +{ + struct spu_gang *gang; + + gang = kzalloc(sizeof *gang, GFP_KERNEL); + if (!gang) + goto out; + + kref_init(&gang->kref); + mutex_init(&gang->mutex); + mutex_init(&gang->aff_mutex); + INIT_LIST_HEAD(&gang->list); + INIT_LIST_HEAD(&gang->aff_list_head); + +out: + return gang; +} + +static void destroy_spu_gang(struct kref *kref) +{ + struct spu_gang *gang; + gang = container_of(kref, struct spu_gang, kref); + WARN_ON(gang->contexts || !list_empty(&gang->list)); + kfree(gang); +} + +struct spu_gang *get_spu_gang(struct spu_gang *gang) +{ + kref_get(&gang->kref); + return gang; +} + +int put_spu_gang(struct spu_gang *gang) +{ + return kref_put(&gang->kref, &destroy_spu_gang); +} + +void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx) +{ + mutex_lock(&gang->mutex); + ctx->gang = get_spu_gang(gang); + list_add(&ctx->gang_list, &gang->list); + gang->contexts++; + mutex_unlock(&gang->mutex); +} + +void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx) +{ + mutex_lock(&gang->mutex); + WARN_ON(ctx->gang != gang); + if (!list_empty(&ctx->aff_list)) { + list_del_init(&ctx->aff_list); + gang->aff_flags &= ~AFF_OFFSETS_SET; + } + list_del_init(&ctx->gang_list); + gang->contexts--; + mutex_unlock(&gang->mutex); + + put_spu_gang(gang); +} diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c new file mode 100644 index 0000000000..8deaf786ed --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c @@ -0,0 +1,335 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* hw_ops.c - query/set operations on active SPU context. + * + * Copyright (C) IBM 2005 + * Author: Mark Nutter + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include "spufs.h" + +static int spu_hw_mbox_read(struct spu_context *ctx, u32 * data) +{ + struct spu *spu = ctx->spu; + struct spu_problem __iomem *prob = spu->problem; + u32 mbox_stat; + int ret = 0; + + spin_lock_irq(&spu->register_lock); + mbox_stat = in_be32(&prob->mb_stat_R); + if (mbox_stat & 0x0000ff) { + *data = in_be32(&prob->pu_mb_R); + ret = 4; + } + spin_unlock_irq(&spu->register_lock); + return ret; +} + +static u32 spu_hw_mbox_stat_read(struct spu_context *ctx) +{ + return in_be32(&ctx->spu->problem->mb_stat_R); +} + +static __poll_t spu_hw_mbox_stat_poll(struct spu_context *ctx, __poll_t events) +{ + struct spu *spu = ctx->spu; + __poll_t ret = 0; + u32 stat; + + spin_lock_irq(&spu->register_lock); + stat = in_be32(&spu->problem->mb_stat_R); + + /* if the requested event is there, return the poll + mask, otherwise enable the interrupt to get notified, + but first mark any pending interrupts as done so + we don't get woken up unnecessarily */ + + if (events & (EPOLLIN | EPOLLRDNORM)) { + if (stat & 0xff0000) + ret |= EPOLLIN | EPOLLRDNORM; + else { + spu_int_stat_clear(spu, 2, CLASS2_MAILBOX_INTR); + spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR); + } + } + if (events & (EPOLLOUT | EPOLLWRNORM)) { + if (stat & 0x00ff00) + ret = EPOLLOUT | EPOLLWRNORM; + else { + spu_int_stat_clear(spu, 2, + CLASS2_MAILBOX_THRESHOLD_INTR); + spu_int_mask_or(spu, 2, + CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR); + } + } + spin_unlock_irq(&spu->register_lock); + return ret; +} + +static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data) +{ + struct spu *spu = ctx->spu; + struct spu_problem __iomem *prob = spu->problem; + struct spu_priv2 __iomem *priv2 = spu->priv2; + int ret; + + spin_lock_irq(&spu->register_lock); + if (in_be32(&prob->mb_stat_R) & 0xff0000) { + /* read the first available word */ + *data = in_be64(&priv2->puint_mb_R); + ret = 4; + } else { + /* make sure we get woken up by the interrupt */ + spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR); + ret = 0; + } + spin_unlock_irq(&spu->register_lock); + return ret; +} + +static int spu_hw_wbox_write(struct spu_context *ctx, u32 data) +{ + struct spu *spu = ctx->spu; + struct spu_problem __iomem *prob = spu->problem; + int ret; + + spin_lock_irq(&spu->register_lock); + if (in_be32(&prob->mb_stat_R) & 0x00ff00) { + /* we have space to write wbox_data to */ + out_be32(&prob->spu_mb_W, data); + ret = 4; + } else { + /* make sure we get woken up by the interrupt when space + becomes available */ + spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR); + ret = 0; + } + spin_unlock_irq(&spu->register_lock); + return ret; +} + +static void spu_hw_signal1_write(struct spu_context *ctx, u32 data) +{ + out_be32(&ctx->spu->problem->signal_notify1, data); +} + +static void spu_hw_signal2_write(struct spu_context *ctx, u32 data) +{ + out_be32(&ctx->spu->problem->signal_notify2, data); +} + +static void spu_hw_signal1_type_set(struct spu_context *ctx, u64 val) +{ + struct spu *spu = ctx->spu; + struct spu_priv2 __iomem *priv2 = spu->priv2; + u64 tmp; + + spin_lock_irq(&spu->register_lock); + tmp = in_be64(&priv2->spu_cfg_RW); + if (val) + tmp |= 1; + else + tmp &= ~1; + out_be64(&priv2->spu_cfg_RW, tmp); + spin_unlock_irq(&spu->register_lock); +} + +static u64 spu_hw_signal1_type_get(struct spu_context *ctx) +{ + return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0); +} + +static void spu_hw_signal2_type_set(struct spu_context *ctx, u64 val) +{ + struct spu *spu = ctx->spu; + struct spu_priv2 __iomem *priv2 = spu->priv2; + u64 tmp; + + spin_lock_irq(&spu->register_lock); + tmp = in_be64(&priv2->spu_cfg_RW); + if (val) + tmp |= 2; + else + tmp &= ~2; + out_be64(&priv2->spu_cfg_RW, tmp); + spin_unlock_irq(&spu->register_lock); +} + +static u64 spu_hw_signal2_type_get(struct spu_context *ctx) +{ + return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0); +} + +static u32 spu_hw_npc_read(struct spu_context *ctx) +{ + return in_be32(&ctx->spu->problem->spu_npc_RW); +} + +static void spu_hw_npc_write(struct spu_context *ctx, u32 val) +{ + out_be32(&ctx->spu->problem->spu_npc_RW, val); +} + +static u32 spu_hw_status_read(struct spu_context *ctx) +{ + return in_be32(&ctx->spu->problem->spu_status_R); +} + +static char *spu_hw_get_ls(struct spu_context *ctx) +{ + return ctx->spu->local_store; +} + +static void spu_hw_privcntl_write(struct spu_context *ctx, u64 val) +{ + out_be64(&ctx->spu->priv2->spu_privcntl_RW, val); +} + +static u32 spu_hw_runcntl_read(struct spu_context *ctx) +{ + return in_be32(&ctx->spu->problem->spu_runcntl_RW); +} + +static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val) +{ + spin_lock_irq(&ctx->spu->register_lock); + if (val & SPU_RUNCNTL_ISOLATE) + spu_hw_privcntl_write(ctx, + SPU_PRIVCNT_LOAD_REQUEST_ENABLE_MASK); + out_be32(&ctx->spu->problem->spu_runcntl_RW, val); + spin_unlock_irq(&ctx->spu->register_lock); +} + +static void spu_hw_runcntl_stop(struct spu_context *ctx) +{ + spin_lock_irq(&ctx->spu->register_lock); + out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP); + while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING) + cpu_relax(); + spin_unlock_irq(&ctx->spu->register_lock); +} + +static void spu_hw_master_start(struct spu_context *ctx) +{ + struct spu *spu = ctx->spu; + u64 sr1; + + spin_lock_irq(&spu->register_lock); + sr1 = spu_mfc_sr1_get(spu) | MFC_STATE1_MASTER_RUN_CONTROL_MASK; + spu_mfc_sr1_set(spu, sr1); + spin_unlock_irq(&spu->register_lock); +} + +static void spu_hw_master_stop(struct spu_context *ctx) +{ + struct spu *spu = ctx->spu; + u64 sr1; + + spin_lock_irq(&spu->register_lock); + sr1 = spu_mfc_sr1_get(spu) & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; + spu_mfc_sr1_set(spu, sr1); + spin_unlock_irq(&spu->register_lock); +} + +static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode) +{ + struct spu_problem __iomem *prob = ctx->spu->problem; + int ret; + + spin_lock_irq(&ctx->spu->register_lock); + ret = -EAGAIN; + if (in_be32(&prob->dma_querytype_RW)) + goto out; + ret = 0; + out_be32(&prob->dma_querymask_RW, mask); + out_be32(&prob->dma_querytype_RW, mode); +out: + spin_unlock_irq(&ctx->spu->register_lock); + return ret; +} + +static u32 spu_hw_read_mfc_tagstatus(struct spu_context * ctx) +{ + return in_be32(&ctx->spu->problem->dma_tagstatus_R); +} + +static u32 spu_hw_get_mfc_free_elements(struct spu_context *ctx) +{ + return in_be32(&ctx->spu->problem->dma_qstatus_R); +} + +static int spu_hw_send_mfc_command(struct spu_context *ctx, + struct mfc_dma_command *cmd) +{ + u32 status; + struct spu_problem __iomem *prob = ctx->spu->problem; + + spin_lock_irq(&ctx->spu->register_lock); + out_be32(&prob->mfc_lsa_W, cmd->lsa); + out_be64(&prob->mfc_ea_W, cmd->ea); + out_be32(&prob->mfc_union_W.by32.mfc_size_tag32, + cmd->size << 16 | cmd->tag); + out_be32(&prob->mfc_union_W.by32.mfc_class_cmd32, + cmd->class << 16 | cmd->cmd); + status = in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32); + spin_unlock_irq(&ctx->spu->register_lock); + + switch (status & 0xffff) { + case 0: + return 0; + case 2: + return -EAGAIN; + default: + return -EINVAL; + } +} + +static void spu_hw_restart_dma(struct spu_context *ctx) +{ + struct spu_priv2 __iomem *priv2 = ctx->spu->priv2; + + if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &ctx->spu->flags)) + out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); +} + +struct spu_context_ops spu_hw_ops = { + .mbox_read = spu_hw_mbox_read, + .mbox_stat_read = spu_hw_mbox_stat_read, + .mbox_stat_poll = spu_hw_mbox_stat_poll, + .ibox_read = spu_hw_ibox_read, + .wbox_write = spu_hw_wbox_write, + .signal1_write = spu_hw_signal1_write, + .signal2_write = spu_hw_signal2_write, + .signal1_type_set = spu_hw_signal1_type_set, + .signal1_type_get = spu_hw_signal1_type_get, + .signal2_type_set = spu_hw_signal2_type_set, + .signal2_type_get = spu_hw_signal2_type_get, + .npc_read = spu_hw_npc_read, + .npc_write = spu_hw_npc_write, + .status_read = spu_hw_status_read, + .get_ls = spu_hw_get_ls, + .privcntl_write = spu_hw_privcntl_write, + .runcntl_read = spu_hw_runcntl_read, + .runcntl_write = spu_hw_runcntl_write, + .runcntl_stop = spu_hw_runcntl_stop, + .master_start = spu_hw_master_start, + .master_stop = spu_hw_master_stop, + .set_mfc_query = spu_hw_set_mfc_query, + .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus, + .get_mfc_free_elements = spu_hw_get_mfc_free_elements, + .send_mfc_command = spu_hw_send_mfc_command, + .restart_dma = spu_hw_restart_dma, +}; diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c new file mode 100644 index 0000000000..38c5be34c8 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -0,0 +1,826 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* + * SPU file system + * + * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 + * + * Author: Arnd Bergmann + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "spufs.h" + +struct spufs_sb_info { + bool debug; +}; + +static struct kmem_cache *spufs_inode_cache; +char *isolated_loader; +static int isolated_loader_size; + +static struct spufs_sb_info *spufs_get_sb_info(struct super_block *sb) +{ + return sb->s_fs_info; +} + +static struct inode * +spufs_alloc_inode(struct super_block *sb) +{ + struct spufs_inode_info *ei; + + ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL); + if (!ei) + return NULL; + + ei->i_gang = NULL; + ei->i_ctx = NULL; + ei->i_openers = 0; + + return &ei->vfs_inode; +} + +static void spufs_free_inode(struct inode *inode) +{ + kmem_cache_free(spufs_inode_cache, SPUFS_I(inode)); +} + +static void +spufs_init_once(void *p) +{ + struct spufs_inode_info *ei = p; + + inode_init_once(&ei->vfs_inode); +} + +static struct inode * +spufs_new_inode(struct super_block *sb, umode_t mode) +{ + struct inode *inode; + + inode = new_inode(sb); + if (!inode) + goto out; + + inode->i_ino = get_next_ino(); + inode->i_mode = mode; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); + inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); +out: + return inode; +} + +static int +spufs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, + struct iattr *attr) +{ + struct inode *inode = d_inode(dentry); + + if ((attr->ia_valid & ATTR_SIZE) && + (attr->ia_size != inode->i_size)) + return -EINVAL; + setattr_copy(&nop_mnt_idmap, inode, attr); + mark_inode_dirty(inode); + return 0; +} + + +static int +spufs_new_file(struct super_block *sb, struct dentry *dentry, + const struct file_operations *fops, umode_t mode, + size_t size, struct spu_context *ctx) +{ + static const struct inode_operations spufs_file_iops = { + .setattr = spufs_setattr, + }; + struct inode *inode; + int ret; + + ret = -ENOSPC; + inode = spufs_new_inode(sb, S_IFREG | mode); + if (!inode) + goto out; + + ret = 0; + inode->i_op = &spufs_file_iops; + inode->i_fop = fops; + inode->i_size = size; + inode->i_private = SPUFS_I(inode)->i_ctx = get_spu_context(ctx); + d_add(dentry, inode); +out: + return ret; +} + +static void +spufs_evict_inode(struct inode *inode) +{ + struct spufs_inode_info *ei = SPUFS_I(inode); + clear_inode(inode); + if (ei->i_ctx) + put_spu_context(ei->i_ctx); + if (ei->i_gang) + put_spu_gang(ei->i_gang); +} + +static void spufs_prune_dir(struct dentry *dir) +{ + struct dentry *dentry, *tmp; + + inode_lock(d_inode(dir)); + list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) { + spin_lock(&dentry->d_lock); + if (simple_positive(dentry)) { + dget_dlock(dentry); + __d_drop(dentry); + spin_unlock(&dentry->d_lock); + simple_unlink(d_inode(dir), dentry); + /* XXX: what was dcache_lock protecting here? Other + * filesystems (IB, configfs) release dcache_lock + * before unlink */ + dput(dentry); + } else { + spin_unlock(&dentry->d_lock); + } + } + shrink_dcache_parent(dir); + inode_unlock(d_inode(dir)); +} + +/* Caller must hold parent->i_mutex */ +static int spufs_rmdir(struct inode *parent, struct dentry *dir) +{ + /* remove all entries */ + int res; + spufs_prune_dir(dir); + d_drop(dir); + res = simple_rmdir(parent, dir); + /* We have to give up the mm_struct */ + spu_forget(SPUFS_I(d_inode(dir))->i_ctx); + return res; +} + +static int spufs_fill_dir(struct dentry *dir, + const struct spufs_tree_descr *files, umode_t mode, + struct spu_context *ctx) +{ + while (files->name && files->name[0]) { + int ret; + struct dentry *dentry = d_alloc_name(dir, files->name); + if (!dentry) + return -ENOMEM; + ret = spufs_new_file(dir->d_sb, dentry, files->ops, + files->mode & mode, files->size, ctx); + if (ret) + return ret; + files++; + } + return 0; +} + +static int spufs_dir_close(struct inode *inode, struct file *file) +{ + struct inode *parent; + struct dentry *dir; + int ret; + + dir = file->f_path.dentry; + parent = d_inode(dir->d_parent); + + inode_lock_nested(parent, I_MUTEX_PARENT); + ret = spufs_rmdir(parent, dir); + inode_unlock(parent); + WARN_ON(ret); + + return dcache_dir_close(inode, file); +} + +const struct file_operations spufs_context_fops = { + .open = dcache_dir_open, + .release = spufs_dir_close, + .llseek = dcache_dir_lseek, + .read = generic_read_dir, + .iterate_shared = dcache_readdir, + .fsync = noop_fsync, +}; +EXPORT_SYMBOL_GPL(spufs_context_fops); + +static int +spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, + umode_t mode) +{ + int ret; + struct inode *inode; + struct spu_context *ctx; + + inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR); + if (!inode) + return -ENOSPC; + + inode_init_owner(&nop_mnt_idmap, inode, dir, mode | S_IFDIR); + ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */ + SPUFS_I(inode)->i_ctx = ctx; + if (!ctx) { + iput(inode); + return -ENOSPC; + } + + ctx->flags = flags; + inode->i_op = &simple_dir_inode_operations; + inode->i_fop = &simple_dir_operations; + + inode_lock(inode); + + dget(dentry); + inc_nlink(dir); + inc_nlink(inode); + + d_instantiate(dentry, inode); + + if (flags & SPU_CREATE_NOSCHED) + ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents, + mode, ctx); + else + ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx); + + if (!ret && spufs_get_sb_info(dir->i_sb)->debug) + ret = spufs_fill_dir(dentry, spufs_dir_debug_contents, + mode, ctx); + + if (ret) + spufs_rmdir(dir, dentry); + + inode_unlock(inode); + + return ret; +} + +static int spufs_context_open(const struct path *path) +{ + int ret; + struct file *filp; + + ret = get_unused_fd_flags(0); + if (ret < 0) + return ret; + + filp = dentry_open(path, O_RDONLY, current_cred()); + if (IS_ERR(filp)) { + put_unused_fd(ret); + return PTR_ERR(filp); + } + + filp->f_op = &spufs_context_fops; + fd_install(ret, filp); + return ret; +} + +static struct spu_context * +spufs_assert_affinity(unsigned int flags, struct spu_gang *gang, + struct file *filp) +{ + struct spu_context *tmp, *neighbor, *err; + int count, node; + int aff_supp; + + aff_supp = !list_empty(&(list_entry(cbe_spu_info[0].spus.next, + struct spu, cbe_list))->aff_list); + + if (!aff_supp) + return ERR_PTR(-EINVAL); + + if (flags & SPU_CREATE_GANG) + return ERR_PTR(-EINVAL); + + if (flags & SPU_CREATE_AFFINITY_MEM && + gang->aff_ref_ctx && + gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM) + return ERR_PTR(-EEXIST); + + if (gang->aff_flags & AFF_MERGED) + return ERR_PTR(-EBUSY); + + neighbor = NULL; + if (flags & SPU_CREATE_AFFINITY_SPU) { + if (!filp || filp->f_op != &spufs_context_fops) + return ERR_PTR(-EINVAL); + + neighbor = get_spu_context( + SPUFS_I(file_inode(filp))->i_ctx); + + if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) && + !list_is_last(&neighbor->aff_list, &gang->aff_list_head) && + !list_entry(neighbor->aff_list.next, struct spu_context, + aff_list)->aff_head) { + err = ERR_PTR(-EEXIST); + goto out_put_neighbor; + } + + if (gang != neighbor->gang) { + err = ERR_PTR(-EINVAL); + goto out_put_neighbor; + } + + count = 1; + list_for_each_entry(tmp, &gang->aff_list_head, aff_list) + count++; + if (list_empty(&neighbor->aff_list)) + count++; + + for (node = 0; node < MAX_NUMNODES; node++) { + if ((cbe_spu_info[node].n_spus - atomic_read( + &cbe_spu_info[node].reserved_spus)) >= count) + break; + } + + if (node == MAX_NUMNODES) { + err = ERR_PTR(-EEXIST); + goto out_put_neighbor; + } + } + + return neighbor; + +out_put_neighbor: + put_spu_context(neighbor); + return err; +} + +static void +spufs_set_affinity(unsigned int flags, struct spu_context *ctx, + struct spu_context *neighbor) +{ + if (flags & SPU_CREATE_AFFINITY_MEM) + ctx->gang->aff_ref_ctx = ctx; + + if (flags & SPU_CREATE_AFFINITY_SPU) { + if (list_empty(&neighbor->aff_list)) { + list_add_tail(&neighbor->aff_list, + &ctx->gang->aff_list_head); + neighbor->aff_head = 1; + } + + if (list_is_last(&neighbor->aff_list, &ctx->gang->aff_list_head) + || list_entry(neighbor->aff_list.next, struct spu_context, + aff_list)->aff_head) { + list_add(&ctx->aff_list, &neighbor->aff_list); + } else { + list_add_tail(&ctx->aff_list, &neighbor->aff_list); + if (neighbor->aff_head) { + neighbor->aff_head = 0; + ctx->aff_head = 1; + } + } + + if (!ctx->gang->aff_ref_ctx) + ctx->gang->aff_ref_ctx = ctx; + } +} + +static int +spufs_create_context(struct inode *inode, struct dentry *dentry, + struct vfsmount *mnt, int flags, umode_t mode, + struct file *aff_filp) +{ + int ret; + int affinity; + struct spu_gang *gang; + struct spu_context *neighbor; + struct path path = {.mnt = mnt, .dentry = dentry}; + + if ((flags & SPU_CREATE_NOSCHED) && + !capable(CAP_SYS_NICE)) + return -EPERM; + + if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE)) + == SPU_CREATE_ISOLATE) + return -EINVAL; + + if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader) + return -ENODEV; + + gang = NULL; + neighbor = NULL; + affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU); + if (affinity) { + gang = SPUFS_I(inode)->i_gang; + if (!gang) + return -EINVAL; + mutex_lock(&gang->aff_mutex); + neighbor = spufs_assert_affinity(flags, gang, aff_filp); + if (IS_ERR(neighbor)) { + ret = PTR_ERR(neighbor); + goto out_aff_unlock; + } + } + + ret = spufs_mkdir(inode, dentry, flags, mode & 0777); + if (ret) + goto out_aff_unlock; + + if (affinity) { + spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx, + neighbor); + if (neighbor) + put_spu_context(neighbor); + } + + ret = spufs_context_open(&path); + if (ret < 0) + WARN_ON(spufs_rmdir(inode, dentry)); + +out_aff_unlock: + if (affinity) + mutex_unlock(&gang->aff_mutex); + return ret; +} + +static int +spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode) +{ + int ret; + struct inode *inode; + struct spu_gang *gang; + + ret = -ENOSPC; + inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR); + if (!inode) + goto out; + + ret = 0; + inode_init_owner(&nop_mnt_idmap, inode, dir, mode | S_IFDIR); + gang = alloc_spu_gang(); + SPUFS_I(inode)->i_ctx = NULL; + SPUFS_I(inode)->i_gang = gang; + if (!gang) { + ret = -ENOMEM; + goto out_iput; + } + + inode->i_op = &simple_dir_inode_operations; + inode->i_fop = &simple_dir_operations; + + d_instantiate(dentry, inode); + inc_nlink(dir); + inc_nlink(d_inode(dentry)); + return ret; + +out_iput: + iput(inode); +out: + return ret; +} + +static int spufs_gang_open(const struct path *path) +{ + int ret; + struct file *filp; + + ret = get_unused_fd_flags(0); + if (ret < 0) + return ret; + + /* + * get references for dget and mntget, will be released + * in error path of *_open(). + */ + filp = dentry_open(path, O_RDONLY, current_cred()); + if (IS_ERR(filp)) { + put_unused_fd(ret); + return PTR_ERR(filp); + } + + filp->f_op = &simple_dir_operations; + fd_install(ret, filp); + return ret; +} + +static int spufs_create_gang(struct inode *inode, + struct dentry *dentry, + struct vfsmount *mnt, umode_t mode) +{ + struct path path = {.mnt = mnt, .dentry = dentry}; + int ret; + + ret = spufs_mkgang(inode, dentry, mode & 0777); + if (!ret) { + ret = spufs_gang_open(&path); + if (ret < 0) { + int err = simple_rmdir(inode, dentry); + WARN_ON(err); + } + } + return ret; +} + + +static struct file_system_type spufs_type; + +long spufs_create(const struct path *path, struct dentry *dentry, + unsigned int flags, umode_t mode, struct file *filp) +{ + struct inode *dir = d_inode(path->dentry); + int ret; + + /* check if we are on spufs */ + if (path->dentry->d_sb->s_type != &spufs_type) + return -EINVAL; + + /* don't accept undefined flags */ + if (flags & (~SPU_CREATE_FLAG_ALL)) + return -EINVAL; + + /* only threads can be underneath a gang */ + if (path->dentry != path->dentry->d_sb->s_root) + if ((flags & SPU_CREATE_GANG) || !SPUFS_I(dir)->i_gang) + return -EINVAL; + + mode &= ~current_umask(); + + if (flags & SPU_CREATE_GANG) + ret = spufs_create_gang(dir, dentry, path->mnt, mode); + else + ret = spufs_create_context(dir, dentry, path->mnt, flags, mode, + filp); + if (ret >= 0) + fsnotify_mkdir(dir, dentry); + + return ret; +} + +/* File system initialization */ +struct spufs_fs_context { + kuid_t uid; + kgid_t gid; + umode_t mode; +}; + +enum { + Opt_uid, Opt_gid, Opt_mode, Opt_debug, +}; + +static const struct fs_parameter_spec spufs_fs_parameters[] = { + fsparam_u32 ("gid", Opt_gid), + fsparam_u32oct ("mode", Opt_mode), + fsparam_u32 ("uid", Opt_uid), + fsparam_flag ("debug", Opt_debug), + {} +}; + +static int spufs_show_options(struct seq_file *m, struct dentry *root) +{ + struct spufs_sb_info *sbi = spufs_get_sb_info(root->d_sb); + struct inode *inode = root->d_inode; + + if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID)) + seq_printf(m, ",uid=%u", + from_kuid_munged(&init_user_ns, inode->i_uid)); + if (!gid_eq(inode->i_gid, GLOBAL_ROOT_GID)) + seq_printf(m, ",gid=%u", + from_kgid_munged(&init_user_ns, inode->i_gid)); + if ((inode->i_mode & S_IALLUGO) != 0775) + seq_printf(m, ",mode=%o", inode->i_mode); + if (sbi->debug) + seq_puts(m, ",debug"); + return 0; +} + +static int spufs_parse_param(struct fs_context *fc, struct fs_parameter *param) +{ + struct spufs_fs_context *ctx = fc->fs_private; + struct spufs_sb_info *sbi = fc->s_fs_info; + struct fs_parse_result result; + kuid_t uid; + kgid_t gid; + int opt; + + opt = fs_parse(fc, spufs_fs_parameters, param, &result); + if (opt < 0) + return opt; + + switch (opt) { + case Opt_uid: + uid = make_kuid(current_user_ns(), result.uint_32); + if (!uid_valid(uid)) + return invalf(fc, "Unknown uid"); + ctx->uid = uid; + break; + case Opt_gid: + gid = make_kgid(current_user_ns(), result.uint_32); + if (!gid_valid(gid)) + return invalf(fc, "Unknown gid"); + ctx->gid = gid; + break; + case Opt_mode: + ctx->mode = result.uint_32 & S_IALLUGO; + break; + case Opt_debug: + sbi->debug = true; + break; + } + + return 0; +} + +static void spufs_exit_isolated_loader(void) +{ + free_pages((unsigned long) isolated_loader, + get_order(isolated_loader_size)); +} + +static void __init +spufs_init_isolated_loader(void) +{ + struct device_node *dn; + const char *loader; + int size; + + dn = of_find_node_by_path("/spu-isolation"); + if (!dn) + return; + + loader = of_get_property(dn, "loader", &size); + of_node_put(dn); + if (!loader) + return; + + /* the loader must be align on a 16 byte boundary */ + isolated_loader = (char *)__get_free_pages(GFP_KERNEL, get_order(size)); + if (!isolated_loader) + return; + + isolated_loader_size = size; + memcpy(isolated_loader, loader, size); + printk(KERN_INFO "spufs: SPU isolation mode enabled\n"); +} + +static int spufs_create_root(struct super_block *sb, struct fs_context *fc) +{ + struct spufs_fs_context *ctx = fc->fs_private; + struct inode *inode; + + if (!spu_management_ops) + return -ENODEV; + + inode = spufs_new_inode(sb, S_IFDIR | ctx->mode); + if (!inode) + return -ENOMEM; + + inode->i_uid = ctx->uid; + inode->i_gid = ctx->gid; + inode->i_op = &simple_dir_inode_operations; + inode->i_fop = &simple_dir_operations; + SPUFS_I(inode)->i_ctx = NULL; + inc_nlink(inode); + + sb->s_root = d_make_root(inode); + if (!sb->s_root) + return -ENOMEM; + return 0; +} + +static const struct super_operations spufs_ops = { + .alloc_inode = spufs_alloc_inode, + .free_inode = spufs_free_inode, + .statfs = simple_statfs, + .evict_inode = spufs_evict_inode, + .show_options = spufs_show_options, +}; + +static int spufs_fill_super(struct super_block *sb, struct fs_context *fc) +{ + sb->s_maxbytes = MAX_LFS_FILESIZE; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; + sb->s_magic = SPUFS_MAGIC; + sb->s_op = &spufs_ops; + + return spufs_create_root(sb, fc); +} + +static int spufs_get_tree(struct fs_context *fc) +{ + return get_tree_single(fc, spufs_fill_super); +} + +static void spufs_free_fc(struct fs_context *fc) +{ + kfree(fc->s_fs_info); +} + +static const struct fs_context_operations spufs_context_ops = { + .free = spufs_free_fc, + .parse_param = spufs_parse_param, + .get_tree = spufs_get_tree, +}; + +static int spufs_init_fs_context(struct fs_context *fc) +{ + struct spufs_fs_context *ctx; + struct spufs_sb_info *sbi; + + ctx = kzalloc(sizeof(struct spufs_fs_context), GFP_KERNEL); + if (!ctx) + goto nomem; + + sbi = kzalloc(sizeof(struct spufs_sb_info), GFP_KERNEL); + if (!sbi) + goto nomem_ctx; + + ctx->uid = current_uid(); + ctx->gid = current_gid(); + ctx->mode = 0755; + + fc->fs_private = ctx; + fc->s_fs_info = sbi; + fc->ops = &spufs_context_ops; + return 0; + +nomem_ctx: + kfree(ctx); +nomem: + return -ENOMEM; +} + +static struct file_system_type spufs_type = { + .owner = THIS_MODULE, + .name = "spufs", + .init_fs_context = spufs_init_fs_context, + .parameters = spufs_fs_parameters, + .kill_sb = kill_litter_super, +}; +MODULE_ALIAS_FS("spufs"); + +static int __init spufs_init(void) +{ + int ret; + + ret = -ENODEV; + if (!spu_management_ops) + goto out; + + ret = -ENOMEM; + spufs_inode_cache = kmem_cache_create("spufs_inode_cache", + sizeof(struct spufs_inode_info), 0, + SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, spufs_init_once); + + if (!spufs_inode_cache) + goto out; + ret = spu_sched_init(); + if (ret) + goto out_cache; + ret = register_spu_syscalls(&spufs_calls); + if (ret) + goto out_sched; + ret = register_filesystem(&spufs_type); + if (ret) + goto out_syscalls; + + spufs_init_isolated_loader(); + + return 0; + +out_syscalls: + unregister_spu_syscalls(&spufs_calls); +out_sched: + spu_sched_exit(); +out_cache: + kmem_cache_destroy(spufs_inode_cache); +out: + return ret; +} +module_init(spufs_init); + +static void __exit spufs_exit(void) +{ + spu_sched_exit(); + spufs_exit_isolated_loader(); + unregister_spu_syscalls(&spufs_calls); + unregister_filesystem(&spufs_type); + kmem_cache_destroy(spufs_inode_cache); +} +module_exit(spufs_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Arnd Bergmann "); + diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c new file mode 100644 index 0000000000..43b9dde7fd --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SPU local store allocation routines + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + */ + +#undef DEBUG + +#include +#include +#include +#include + +#include +#include +#include + +#include "spufs.h" + +int spu_alloc_lscsa(struct spu_state *csa) +{ + struct spu_lscsa *lscsa; + unsigned char *p; + + lscsa = vzalloc(sizeof(*lscsa)); + if (!lscsa) + return -ENOMEM; + csa->lscsa = lscsa; + + /* Set LS pages reserved to allow for user-space mapping. */ + for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE) + SetPageReserved(vmalloc_to_page(p)); + + return 0; +} + +void spu_free_lscsa(struct spu_state *csa) +{ + /* Clear reserved bit before vfree. */ + unsigned char *p; + + if (csa->lscsa == NULL) + return; + + for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE) + ClearPageReserved(vmalloc_to_page(p)); + + vfree(csa->lscsa); +} diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c new file mode 100644 index 0000000000..ce52b87496 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -0,0 +1,451 @@ +// SPDX-License-Identifier: GPL-2.0 +#define DEBUG + +#include +#include + +#include +#include +#include +#include + +#include "spufs.h" + +/* interrupt-level stop callback function. */ +void spufs_stop_callback(struct spu *spu, int irq) +{ + struct spu_context *ctx = spu->ctx; + + /* + * It should be impossible to preempt a context while an exception + * is being processed, since the context switch code is specially + * coded to deal with interrupts ... But, just in case, sanity check + * the context pointer. It is OK to return doing nothing since + * the exception will be regenerated when the context is resumed. + */ + if (ctx) { + /* Copy exception arguments into module specific structure */ + switch(irq) { + case 0 : + ctx->csa.class_0_pending = spu->class_0_pending; + ctx->csa.class_0_dar = spu->class_0_dar; + break; + case 1 : + ctx->csa.class_1_dsisr = spu->class_1_dsisr; + ctx->csa.class_1_dar = spu->class_1_dar; + break; + case 2 : + break; + } + + /* ensure that the exception status has hit memory before a + * thread waiting on the context's stop queue is woken */ + smp_wmb(); + + wake_up_all(&ctx->stop_wq); + } +} + +int spu_stopped(struct spu_context *ctx, u32 *stat) +{ + u64 dsisr; + u32 stopped; + + stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | + SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; + +top: + *stat = ctx->ops->status_read(ctx); + if (*stat & stopped) { + /* + * If the spu hasn't finished stopping, we need to + * re-read the register to get the stopped value. + */ + if (*stat & SPU_STATUS_RUNNING) + goto top; + return 1; + } + + if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) + return 1; + + dsisr = ctx->csa.class_1_dsisr; + if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) + return 1; + + if (ctx->csa.class_0_pending) + return 1; + + return 0; +} + +static int spu_setup_isolated(struct spu_context *ctx) +{ + int ret; + u64 __iomem *mfc_cntl; + u64 sr1; + u32 status; + unsigned long timeout; + const u32 status_loading = SPU_STATUS_RUNNING + | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS; + + ret = -ENODEV; + if (!isolated_loader) + goto out; + + /* + * We need to exclude userspace access to the context. + * + * To protect against memory access we invalidate all ptes + * and make sure the pagefault handlers block on the mutex. + */ + spu_unmap_mappings(ctx); + + mfc_cntl = &ctx->spu->priv2->mfc_control_RW; + + /* purge the MFC DMA queue to ensure no spurious accesses before we + * enter kernel mode */ + timeout = jiffies + HZ; + out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST); + while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK) + != MFC_CNTL_PURGE_DMA_COMPLETE) { + if (time_after(jiffies, timeout)) { + printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n", + __func__); + ret = -EIO; + goto out; + } + cond_resched(); + } + + /* clear purge status */ + out_be64(mfc_cntl, 0); + + /* put the SPE in kernel mode to allow access to the loader */ + sr1 = spu_mfc_sr1_get(ctx->spu); + sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK; + spu_mfc_sr1_set(ctx->spu, sr1); + + /* start the loader */ + ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32); + ctx->ops->signal2_write(ctx, + (unsigned long)isolated_loader & 0xffffffff); + + ctx->ops->runcntl_write(ctx, + SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); + + ret = 0; + timeout = jiffies + HZ; + while (((status = ctx->ops->status_read(ctx)) & status_loading) == + status_loading) { + if (time_after(jiffies, timeout)) { + printk(KERN_ERR "%s: timeout waiting for loader\n", + __func__); + ret = -EIO; + goto out_drop_priv; + } + cond_resched(); + } + + if (!(status & SPU_STATUS_RUNNING)) { + /* If isolated LOAD has failed: run SPU, we will get a stop-and + * signal later. */ + pr_debug("%s: isolated LOAD failed\n", __func__); + ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); + ret = -EACCES; + goto out_drop_priv; + } + + if (!(status & SPU_STATUS_ISOLATED_STATE)) { + /* This isn't allowed by the CBEA, but check anyway */ + pr_debug("%s: SPU fell out of isolated mode?\n", __func__); + ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP); + ret = -EINVAL; + goto out_drop_priv; + } + +out_drop_priv: + /* Finished accessing the loader. Drop kernel mode */ + sr1 |= MFC_STATE1_PROBLEM_STATE_MASK; + spu_mfc_sr1_set(ctx->spu, sr1); + +out: + return ret; +} + +static int spu_run_init(struct spu_context *ctx, u32 *npc) +{ + unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; + int ret; + + spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); + + /* + * NOSCHED is synchronous scheduling with respect to the caller. + * The caller waits for the context to be loaded. + */ + if (ctx->flags & SPU_CREATE_NOSCHED) { + if (ctx->state == SPU_STATE_SAVED) { + ret = spu_activate(ctx, 0); + if (ret) + return ret; + } + } + + /* + * Apply special setup as required. + */ + if (ctx->flags & SPU_CREATE_ISOLATE) { + if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { + ret = spu_setup_isolated(ctx); + if (ret) + return ret; + } + + /* + * If userspace has set the runcntrl register (eg, to + * issue an isolated exit), we need to re-set it here + */ + runcntl = ctx->ops->runcntl_read(ctx) & + (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); + if (runcntl == 0) + runcntl = SPU_RUNCNTL_RUNNABLE; + } else { + unsigned long privcntl; + + if (test_thread_flag(TIF_SINGLESTEP)) + privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP; + else + privcntl = SPU_PRIVCNTL_MODE_NORMAL; + + ctx->ops->privcntl_write(ctx, privcntl); + ctx->ops->npc_write(ctx, *npc); + } + + ctx->ops->runcntl_write(ctx, runcntl); + + if (ctx->flags & SPU_CREATE_NOSCHED) { + spuctx_switch_state(ctx, SPU_UTIL_USER); + } else { + + if (ctx->state == SPU_STATE_SAVED) { + ret = spu_activate(ctx, 0); + if (ret) + return ret; + } else { + spuctx_switch_state(ctx, SPU_UTIL_USER); + } + } + + set_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags); + return 0; +} + +static int spu_run_fini(struct spu_context *ctx, u32 *npc, + u32 *status) +{ + int ret = 0; + + spu_del_from_rq(ctx); + + *status = ctx->ops->status_read(ctx); + *npc = ctx->ops->npc_read(ctx); + + spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); + clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags); + spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status); + spu_release(ctx); + + if (signal_pending(current)) + ret = -ERESTARTSYS; + + return ret; +} + +/* + * SPU syscall restarting is tricky because we violate the basic + * assumption that the signal handler is running on the interrupted + * thread. Here instead, the handler runs on PowerPC user space code, + * while the syscall was called from the SPU. + * This means we can only do a very rough approximation of POSIX + * signal semantics. + */ +static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret, + unsigned int *npc) +{ + int ret; + + switch (*spu_ret) { + case -ERESTARTSYS: + case -ERESTARTNOINTR: + /* + * Enter the regular syscall restarting for + * sys_spu_run, then restart the SPU syscall + * callback. + */ + *npc -= 8; + ret = -ERESTARTSYS; + break; + case -ERESTARTNOHAND: + case -ERESTART_RESTARTBLOCK: + /* + * Restart block is too hard for now, just return -EINTR + * to the SPU. + * ERESTARTNOHAND comes from sys_pause, we also return + * -EINTR from there. + * Assume that we need to be restarted ourselves though. + */ + *spu_ret = -EINTR; + ret = -ERESTARTSYS; + break; + default: + printk(KERN_WARNING "%s: unexpected return code %ld\n", + __func__, *spu_ret); + ret = 0; + } + return ret; +} + +static int spu_process_callback(struct spu_context *ctx) +{ + struct spu_syscall_block s; + u32 ls_pointer, npc; + void __iomem *ls; + long spu_ret; + int ret; + + /* get syscall block from local store */ + npc = ctx->ops->npc_read(ctx) & ~3; + ls = (void __iomem *)ctx->ops->get_ls(ctx); + ls_pointer = in_be32(ls + npc); + if (ls_pointer > (LS_SIZE - sizeof(s))) + return -EFAULT; + memcpy_fromio(&s, ls + ls_pointer, sizeof(s)); + + /* do actual syscall without pinning the spu */ + ret = 0; + spu_ret = -ENOSYS; + npc += 4; + + if (s.nr_ret < NR_syscalls) { + spu_release(ctx); + /* do actual system call from here */ + spu_ret = spu_sys_callback(&s); + if (spu_ret <= -ERESTARTSYS) { + ret = spu_handle_restartsys(ctx, &spu_ret, &npc); + } + mutex_lock(&ctx->state_mutex); + if (ret == -ERESTARTSYS) + return ret; + } + + /* need to re-get the ls, as it may have changed when we released the + * spu */ + ls = (void __iomem *)ctx->ops->get_ls(ctx); + + /* write result, jump over indirect pointer */ + memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret)); + ctx->ops->npc_write(ctx, npc); + ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); + return ret; +} + +long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) +{ + int ret; + u32 status; + + if (mutex_lock_interruptible(&ctx->run_mutex)) + return -ERESTARTSYS; + + ctx->event_return = 0; + + ret = spu_acquire(ctx); + if (ret) + goto out_unlock; + + spu_enable_spu(ctx); + + spu_update_sched_info(ctx); + + ret = spu_run_init(ctx, npc); + if (ret) { + spu_release(ctx); + goto out; + } + + do { + ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); + if (unlikely(ret)) { + /* + * This is nasty: we need the state_mutex for all the + * bookkeeping even if the syscall was interrupted by + * a signal. ewww. + */ + mutex_lock(&ctx->state_mutex); + break; + } + if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE, + &ctx->sched_flags))) { + if (!(status & SPU_STATUS_STOPPED_BY_STOP)) + continue; + } + + spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); + + if ((status & SPU_STATUS_STOPPED_BY_STOP) && + (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) { + ret = spu_process_callback(ctx); + if (ret) + break; + status &= ~SPU_STATUS_STOPPED_BY_STOP; + } + ret = spufs_handle_class1(ctx); + if (ret) + break; + + ret = spufs_handle_class0(ctx); + if (ret) + break; + + if (signal_pending(current)) + ret = -ERESTARTSYS; + } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | + SPU_STATUS_STOPPED_BY_HALT | + SPU_STATUS_SINGLE_STEP))); + + spu_disable_spu(ctx); + ret = spu_run_fini(ctx, npc, &status); + spu_yield(ctx); + + if ((status & SPU_STATUS_STOPPED_BY_STOP) && + (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100)) + ctx->stats.libassist++; + + if ((ret == 0) || + ((ret == -ERESTARTSYS) && + ((status & SPU_STATUS_STOPPED_BY_HALT) || + (status & SPU_STATUS_SINGLE_STEP) || + ((status & SPU_STATUS_STOPPED_BY_STOP) && + (status >> SPU_STOP_STATUS_SHIFT != 0x2104))))) + ret = status; + + /* Note: we don't need to force_sig SIGTRAP on single-step + * since we have TIF_SINGLESTEP set, thus the kernel will do + * it upon return from the syscall anyway. + */ + if (unlikely(status & SPU_STATUS_SINGLE_STEP)) + ret = -ERESTARTSYS; + + else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP) + && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) { + force_sig(SIGTRAP); + ret = -ERESTARTSYS; + } + +out: + *event = ctx->event_return; +out_unlock: + mutex_unlock(&ctx->run_mutex); + return ret; +} diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c new file mode 100644 index 0000000000..99bd027a7f --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -0,0 +1,1141 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* sched.c - SPU scheduler. + * + * Copyright (C) IBM 2005 + * Author: Mark Nutter + * + * 2006-03-31 NUMA domains added. + */ + +#undef DEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include "spufs.h" +#define CREATE_TRACE_POINTS +#include "sputrace.h" + +struct spu_prio_array { + DECLARE_BITMAP(bitmap, MAX_PRIO); + struct list_head runq[MAX_PRIO]; + spinlock_t runq_lock; + int nr_waiting; +}; + +static unsigned long spu_avenrun[3]; +static struct spu_prio_array *spu_prio; +static struct task_struct *spusched_task; +static struct timer_list spusched_timer; +static struct timer_list spuloadavg_timer; + +/* + * Priority of a normal, non-rt, non-niced'd process (aka nice level 0). + */ +#define NORMAL_PRIO 120 + +/* + * Frequency of the spu scheduler tick. By default we do one SPU scheduler + * tick for every 10 CPU scheduler ticks. + */ +#define SPUSCHED_TICK (10) + +/* + * These are the 'tuning knobs' of the scheduler: + * + * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is + * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs. + */ +#define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1) +#define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK)) + +#define SCALE_PRIO(x, prio) \ + max(x * (MAX_PRIO - prio) / (NICE_WIDTH / 2), MIN_SPU_TIMESLICE) + +/* + * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values: + * [800ms ... 100ms ... 5ms] + * + * The higher a thread's priority, the bigger timeslices + * it gets during one round of execution. But even the lowest + * priority thread gets MIN_TIMESLICE worth of execution time. + */ +void spu_set_timeslice(struct spu_context *ctx) +{ + if (ctx->prio < NORMAL_PRIO) + ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio); + else + ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio); +} + +/* + * Update scheduling information from the owning thread. + */ +void __spu_update_sched_info(struct spu_context *ctx) +{ + /* + * assert that the context is not on the runqueue, so it is safe + * to change its scheduling parameters. + */ + BUG_ON(!list_empty(&ctx->rq)); + + /* + * 32-Bit assignments are atomic on powerpc, and we don't care about + * memory ordering here because retrieving the controlling thread is + * per definition racy. + */ + ctx->tid = current->pid; + + /* + * We do our own priority calculations, so we normally want + * ->static_prio to start with. Unfortunately this field + * contains junk for threads with a realtime scheduling + * policy so we have to look at ->prio in this case. + */ + if (rt_prio(current->prio)) + ctx->prio = current->prio; + else + ctx->prio = current->static_prio; + ctx->policy = current->policy; + + /* + * TO DO: the context may be loaded, so we may need to activate + * it again on a different node. But it shouldn't hurt anything + * to update its parameters, because we know that the scheduler + * is not actively looking at this field, since it is not on the + * runqueue. The context will be rescheduled on the proper node + * if it is timesliced or preempted. + */ + cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr); + + /* Save the current cpu id for spu interrupt routing. */ + ctx->last_ran = raw_smp_processor_id(); +} + +void spu_update_sched_info(struct spu_context *ctx) +{ + int node; + + if (ctx->state == SPU_STATE_RUNNABLE) { + node = ctx->spu->node; + + /* + * Take list_mutex to sync with find_victim(). + */ + mutex_lock(&cbe_spu_info[node].list_mutex); + __spu_update_sched_info(ctx); + mutex_unlock(&cbe_spu_info[node].list_mutex); + } else { + __spu_update_sched_info(ctx); + } +} + +static int __node_allowed(struct spu_context *ctx, int node) +{ + if (nr_cpus_node(node)) { + const struct cpumask *mask = cpumask_of_node(node); + + if (cpumask_intersects(mask, &ctx->cpus_allowed)) + return 1; + } + + return 0; +} + +static int node_allowed(struct spu_context *ctx, int node) +{ + int rval; + + spin_lock(&spu_prio->runq_lock); + rval = __node_allowed(ctx, node); + spin_unlock(&spu_prio->runq_lock); + + return rval; +} + +void do_notify_spus_active(void) +{ + int node; + + /* + * Wake up the active spu_contexts. + */ + for_each_online_node(node) { + struct spu *spu; + + mutex_lock(&cbe_spu_info[node].list_mutex); + list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { + if (spu->alloc_state != SPU_FREE) { + struct spu_context *ctx = spu->ctx; + set_bit(SPU_SCHED_NOTIFY_ACTIVE, + &ctx->sched_flags); + mb(); + wake_up_all(&ctx->stop_wq); + } + } + mutex_unlock(&cbe_spu_info[node].list_mutex); + } +} + +/** + * spu_bind_context - bind spu context to physical spu + * @spu: physical spu to bind to + * @ctx: context to bind + */ +static void spu_bind_context(struct spu *spu, struct spu_context *ctx) +{ + spu_context_trace(spu_bind_context__enter, ctx, spu); + + spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); + + if (ctx->flags & SPU_CREATE_NOSCHED) + atomic_inc(&cbe_spu_info[spu->node].reserved_spus); + + ctx->stats.slb_flt_base = spu->stats.slb_flt; + ctx->stats.class2_intr_base = spu->stats.class2_intr; + + spu_associate_mm(spu, ctx->owner); + + spin_lock_irq(&spu->register_lock); + spu->ctx = ctx; + spu->flags = 0; + ctx->spu = spu; + ctx->ops = &spu_hw_ops; + spu->pid = current->pid; + spu->tgid = current->tgid; + spu->ibox_callback = spufs_ibox_callback; + spu->wbox_callback = spufs_wbox_callback; + spu->stop_callback = spufs_stop_callback; + spu->mfc_callback = spufs_mfc_callback; + spin_unlock_irq(&spu->register_lock); + + spu_unmap_mappings(ctx); + + spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); + spu_restore(&ctx->csa, spu); + spu->timestamp = jiffies; + ctx->state = SPU_STATE_RUNNABLE; + + spuctx_switch_state(ctx, SPU_UTIL_USER); +} + +/* + * Must be used with the list_mutex held. + */ +static inline int sched_spu(struct spu *spu) +{ + BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex)); + + return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED)); +} + +static void aff_merge_remaining_ctxs(struct spu_gang *gang) +{ + struct spu_context *ctx; + + list_for_each_entry(ctx, &gang->aff_list_head, aff_list) { + if (list_empty(&ctx->aff_list)) + list_add(&ctx->aff_list, &gang->aff_list_head); + } + gang->aff_flags |= AFF_MERGED; +} + +static void aff_set_offsets(struct spu_gang *gang) +{ + struct spu_context *ctx; + int offset; + + offset = -1; + list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list, + aff_list) { + if (&ctx->aff_list == &gang->aff_list_head) + break; + ctx->aff_offset = offset--; + } + + offset = 0; + list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) { + if (&ctx->aff_list == &gang->aff_list_head) + break; + ctx->aff_offset = offset++; + } + + gang->aff_flags |= AFF_OFFSETS_SET; +} + +static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff, + int group_size, int lowest_offset) +{ + struct spu *spu; + int node, n; + + /* + * TODO: A better algorithm could be used to find a good spu to be + * used as reference location for the ctxs chain. + */ + node = cpu_to_node(raw_smp_processor_id()); + for (n = 0; n < MAX_NUMNODES; n++, node++) { + /* + * "available_spus" counts how many spus are not potentially + * going to be used by other affinity gangs whose reference + * context is already in place. Although this code seeks to + * avoid having affinity gangs with a summed amount of + * contexts bigger than the amount of spus in the node, + * this may happen sporadically. In this case, available_spus + * becomes negative, which is harmless. + */ + int available_spus; + + node = (node < MAX_NUMNODES) ? node : 0; + if (!node_allowed(ctx, node)) + continue; + + available_spus = 0; + mutex_lock(&cbe_spu_info[node].list_mutex); + list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { + if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset + && spu->ctx->gang->aff_ref_spu) + available_spus -= spu->ctx->gang->contexts; + available_spus++; + } + if (available_spus < ctx->gang->contexts) { + mutex_unlock(&cbe_spu_info[node].list_mutex); + continue; + } + + list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { + if ((!mem_aff || spu->has_mem_affinity) && + sched_spu(spu)) { + mutex_unlock(&cbe_spu_info[node].list_mutex); + return spu; + } + } + mutex_unlock(&cbe_spu_info[node].list_mutex); + } + return NULL; +} + +static void aff_set_ref_point_location(struct spu_gang *gang) +{ + int mem_aff, gs, lowest_offset; + struct spu_context *tmp, *ctx; + + mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM; + lowest_offset = 0; + gs = 0; + + list_for_each_entry(tmp, &gang->aff_list_head, aff_list) + gs++; + + list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list, + aff_list) { + if (&ctx->aff_list == &gang->aff_list_head) + break; + lowest_offset = ctx->aff_offset; + } + + gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs, + lowest_offset); +} + +static struct spu *ctx_location(struct spu *ref, int offset, int node) +{ + struct spu *spu; + + spu = NULL; + if (offset >= 0) { + list_for_each_entry(spu, ref->aff_list.prev, aff_list) { + BUG_ON(spu->node != node); + if (offset == 0) + break; + if (sched_spu(spu)) + offset--; + } + } else { + list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) { + BUG_ON(spu->node != node); + if (offset == 0) + break; + if (sched_spu(spu)) + offset++; + } + } + + return spu; +} + +/* + * affinity_check is called each time a context is going to be scheduled. + * It returns the spu ptr on which the context must run. + */ +static int has_affinity(struct spu_context *ctx) +{ + struct spu_gang *gang = ctx->gang; + + if (list_empty(&ctx->aff_list)) + return 0; + + if (atomic_read(&ctx->gang->aff_sched_count) == 0) + ctx->gang->aff_ref_spu = NULL; + + if (!gang->aff_ref_spu) { + if (!(gang->aff_flags & AFF_MERGED)) + aff_merge_remaining_ctxs(gang); + if (!(gang->aff_flags & AFF_OFFSETS_SET)) + aff_set_offsets(gang); + aff_set_ref_point_location(gang); + } + + return gang->aff_ref_spu != NULL; +} + +/** + * spu_unbind_context - unbind spu context from physical spu + * @spu: physical spu to unbind from + * @ctx: context to unbind + */ +static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) +{ + u32 status; + + spu_context_trace(spu_unbind_context__enter, ctx, spu); + + spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); + + if (spu->ctx->flags & SPU_CREATE_NOSCHED) + atomic_dec(&cbe_spu_info[spu->node].reserved_spus); + + if (ctx->gang) + /* + * If ctx->gang->aff_sched_count is positive, SPU affinity is + * being considered in this gang. Using atomic_dec_if_positive + * allow us to skip an explicit check for affinity in this gang + */ + atomic_dec_if_positive(&ctx->gang->aff_sched_count); + + spu_unmap_mappings(ctx); + spu_save(&ctx->csa, spu); + spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0); + + spin_lock_irq(&spu->register_lock); + spu->timestamp = jiffies; + ctx->state = SPU_STATE_SAVED; + spu->ibox_callback = NULL; + spu->wbox_callback = NULL; + spu->stop_callback = NULL; + spu->mfc_callback = NULL; + spu->pid = 0; + spu->tgid = 0; + ctx->ops = &spu_backing_ops; + spu->flags = 0; + spu->ctx = NULL; + spin_unlock_irq(&spu->register_lock); + + spu_associate_mm(spu, NULL); + + ctx->stats.slb_flt += + (spu->stats.slb_flt - ctx->stats.slb_flt_base); + ctx->stats.class2_intr += + (spu->stats.class2_intr - ctx->stats.class2_intr_base); + + /* This maps the underlying spu state to idle */ + spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); + ctx->spu = NULL; + + if (spu_stopped(ctx, &status)) + wake_up_all(&ctx->stop_wq); +} + +/** + * spu_add_to_rq - add a context to the runqueue + * @ctx: context to add + */ +static void __spu_add_to_rq(struct spu_context *ctx) +{ + /* + * Unfortunately this code path can be called from multiple threads + * on behalf of a single context due to the way the problem state + * mmap support works. + * + * Fortunately we need to wake up all these threads at the same time + * and can simply skip the runqueue addition for every but the first + * thread getting into this codepath. + * + * It's still quite hacky, and long-term we should proxy all other + * threads through the owner thread so that spu_run is in control + * of all the scheduling activity for a given context. + */ + if (list_empty(&ctx->rq)) { + list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); + set_bit(ctx->prio, spu_prio->bitmap); + if (!spu_prio->nr_waiting++) + mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); + } +} + +static void spu_add_to_rq(struct spu_context *ctx) +{ + spin_lock(&spu_prio->runq_lock); + __spu_add_to_rq(ctx); + spin_unlock(&spu_prio->runq_lock); +} + +static void __spu_del_from_rq(struct spu_context *ctx) +{ + int prio = ctx->prio; + + if (!list_empty(&ctx->rq)) { + if (!--spu_prio->nr_waiting) + del_timer(&spusched_timer); + list_del_init(&ctx->rq); + + if (list_empty(&spu_prio->runq[prio])) + clear_bit(prio, spu_prio->bitmap); + } +} + +void spu_del_from_rq(struct spu_context *ctx) +{ + spin_lock(&spu_prio->runq_lock); + __spu_del_from_rq(ctx); + spin_unlock(&spu_prio->runq_lock); +} + +static void spu_prio_wait(struct spu_context *ctx) +{ + DEFINE_WAIT(wait); + + /* + * The caller must explicitly wait for a context to be loaded + * if the nosched flag is set. If NOSCHED is not set, the caller + * queues the context and waits for an spu event or error. + */ + BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED)); + + spin_lock(&spu_prio->runq_lock); + prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); + if (!signal_pending(current)) { + __spu_add_to_rq(ctx); + spin_unlock(&spu_prio->runq_lock); + mutex_unlock(&ctx->state_mutex); + schedule(); + mutex_lock(&ctx->state_mutex); + spin_lock(&spu_prio->runq_lock); + __spu_del_from_rq(ctx); + } + spin_unlock(&spu_prio->runq_lock); + __set_current_state(TASK_RUNNING); + remove_wait_queue(&ctx->stop_wq, &wait); +} + +static struct spu *spu_get_idle(struct spu_context *ctx) +{ + struct spu *spu, *aff_ref_spu; + int node, n; + + spu_context_nospu_trace(spu_get_idle__enter, ctx); + + if (ctx->gang) { + mutex_lock(&ctx->gang->aff_mutex); + if (has_affinity(ctx)) { + aff_ref_spu = ctx->gang->aff_ref_spu; + atomic_inc(&ctx->gang->aff_sched_count); + mutex_unlock(&ctx->gang->aff_mutex); + node = aff_ref_spu->node; + + mutex_lock(&cbe_spu_info[node].list_mutex); + spu = ctx_location(aff_ref_spu, ctx->aff_offset, node); + if (spu && spu->alloc_state == SPU_FREE) + goto found; + mutex_unlock(&cbe_spu_info[node].list_mutex); + + atomic_dec(&ctx->gang->aff_sched_count); + goto not_found; + } + mutex_unlock(&ctx->gang->aff_mutex); + } + node = cpu_to_node(raw_smp_processor_id()); + for (n = 0; n < MAX_NUMNODES; n++, node++) { + node = (node < MAX_NUMNODES) ? node : 0; + if (!node_allowed(ctx, node)) + continue; + + mutex_lock(&cbe_spu_info[node].list_mutex); + list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { + if (spu->alloc_state == SPU_FREE) + goto found; + } + mutex_unlock(&cbe_spu_info[node].list_mutex); + } + + not_found: + spu_context_nospu_trace(spu_get_idle__not_found, ctx); + return NULL; + + found: + spu->alloc_state = SPU_USED; + mutex_unlock(&cbe_spu_info[node].list_mutex); + spu_context_trace(spu_get_idle__found, ctx, spu); + spu_init_channels(spu); + return spu; +} + +/** + * find_victim - find a lower priority context to preempt + * @ctx: candidate context for running + * + * Returns the freed physical spu to run the new context on. + */ +static struct spu *find_victim(struct spu_context *ctx) +{ + struct spu_context *victim = NULL; + struct spu *spu; + int node, n; + + spu_context_nospu_trace(spu_find_victim__enter, ctx); + + /* + * Look for a possible preemption candidate on the local node first. + * If there is no candidate look at the other nodes. This isn't + * exactly fair, but so far the whole spu scheduler tries to keep + * a strong node affinity. We might want to fine-tune this in + * the future. + */ + restart: + node = cpu_to_node(raw_smp_processor_id()); + for (n = 0; n < MAX_NUMNODES; n++, node++) { + node = (node < MAX_NUMNODES) ? node : 0; + if (!node_allowed(ctx, node)) + continue; + + mutex_lock(&cbe_spu_info[node].list_mutex); + list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { + struct spu_context *tmp = spu->ctx; + + if (tmp && tmp->prio > ctx->prio && + !(tmp->flags & SPU_CREATE_NOSCHED) && + (!victim || tmp->prio > victim->prio)) { + victim = spu->ctx; + } + } + if (victim) + get_spu_context(victim); + mutex_unlock(&cbe_spu_info[node].list_mutex); + + if (victim) { + /* + * This nests ctx->state_mutex, but we always lock + * higher priority contexts before lower priority + * ones, so this is safe until we introduce + * priority inheritance schemes. + * + * XXX if the highest priority context is locked, + * this can loop a long time. Might be better to + * look at another context or give up after X retries. + */ + if (!mutex_trylock(&victim->state_mutex)) { + put_spu_context(victim); + victim = NULL; + goto restart; + } + + spu = victim->spu; + if (!spu || victim->prio <= ctx->prio) { + /* + * This race can happen because we've dropped + * the active list mutex. Not a problem, just + * restart the search. + */ + mutex_unlock(&victim->state_mutex); + put_spu_context(victim); + victim = NULL; + goto restart; + } + + spu_context_trace(__spu_deactivate__unload, ctx, spu); + + mutex_lock(&cbe_spu_info[node].list_mutex); + cbe_spu_info[node].nr_active--; + spu_unbind_context(spu, victim); + mutex_unlock(&cbe_spu_info[node].list_mutex); + + victim->stats.invol_ctx_switch++; + spu->stats.invol_ctx_switch++; + if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags)) + spu_add_to_rq(victim); + + mutex_unlock(&victim->state_mutex); + put_spu_context(victim); + + return spu; + } + } + + return NULL; +} + +static void __spu_schedule(struct spu *spu, struct spu_context *ctx) +{ + int node = spu->node; + int success = 0; + + spu_set_timeslice(ctx); + + mutex_lock(&cbe_spu_info[node].list_mutex); + if (spu->ctx == NULL) { + spu_bind_context(spu, ctx); + cbe_spu_info[node].nr_active++; + spu->alloc_state = SPU_USED; + success = 1; + } + mutex_unlock(&cbe_spu_info[node].list_mutex); + + if (success) + wake_up_all(&ctx->run_wq); + else + spu_add_to_rq(ctx); +} + +static void spu_schedule(struct spu *spu, struct spu_context *ctx) +{ + /* not a candidate for interruptible because it's called either + from the scheduler thread or from spu_deactivate */ + mutex_lock(&ctx->state_mutex); + if (ctx->state == SPU_STATE_SAVED) + __spu_schedule(spu, ctx); + spu_release(ctx); +} + +/** + * spu_unschedule - remove a context from a spu, and possibly release it. + * @spu: The SPU to unschedule from + * @ctx: The context currently scheduled on the SPU + * @free_spu Whether to free the SPU for other contexts + * + * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the + * SPU is made available for other contexts (ie, may be returned by + * spu_get_idle). If this is zero, the caller is expected to schedule another + * context to this spu. + * + * Should be called with ctx->state_mutex held. + */ +static void spu_unschedule(struct spu *spu, struct spu_context *ctx, + int free_spu) +{ + int node = spu->node; + + mutex_lock(&cbe_spu_info[node].list_mutex); + cbe_spu_info[node].nr_active--; + if (free_spu) + spu->alloc_state = SPU_FREE; + spu_unbind_context(spu, ctx); + ctx->stats.invol_ctx_switch++; + spu->stats.invol_ctx_switch++; + mutex_unlock(&cbe_spu_info[node].list_mutex); +} + +/** + * spu_activate - find a free spu for a context and execute it + * @ctx: spu context to schedule + * @flags: flags (currently ignored) + * + * Tries to find a free spu to run @ctx. If no free spu is available + * add the context to the runqueue so it gets woken up once an spu + * is available. + */ +int spu_activate(struct spu_context *ctx, unsigned long flags) +{ + struct spu *spu; + + /* + * If there are multiple threads waiting for a single context + * only one actually binds the context while the others will + * only be able to acquire the state_mutex once the context + * already is in runnable state. + */ + if (ctx->spu) + return 0; + +spu_activate_top: + if (signal_pending(current)) + return -ERESTARTSYS; + + spu = spu_get_idle(ctx); + /* + * If this is a realtime thread we try to get it running by + * preempting a lower priority thread. + */ + if (!spu && rt_prio(ctx->prio)) + spu = find_victim(ctx); + if (spu) { + unsigned long runcntl; + + runcntl = ctx->ops->runcntl_read(ctx); + __spu_schedule(spu, ctx); + if (runcntl & SPU_RUNCNTL_RUNNABLE) + spuctx_switch_state(ctx, SPU_UTIL_USER); + + return 0; + } + + if (ctx->flags & SPU_CREATE_NOSCHED) { + spu_prio_wait(ctx); + goto spu_activate_top; + } + + spu_add_to_rq(ctx); + + return 0; +} + +/** + * grab_runnable_context - try to find a runnable context + * + * Remove the highest priority context on the runqueue and return it + * to the caller. Returns %NULL if no runnable context was found. + */ +static struct spu_context *grab_runnable_context(int prio, int node) +{ + struct spu_context *ctx; + int best; + + spin_lock(&spu_prio->runq_lock); + best = find_first_bit(spu_prio->bitmap, prio); + while (best < prio) { + struct list_head *rq = &spu_prio->runq[best]; + + list_for_each_entry(ctx, rq, rq) { + /* XXX(hch): check for affinity here as well */ + if (__node_allowed(ctx, node)) { + __spu_del_from_rq(ctx); + goto found; + } + } + best++; + } + ctx = NULL; + found: + spin_unlock(&spu_prio->runq_lock); + return ctx; +} + +static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) +{ + struct spu *spu = ctx->spu; + struct spu_context *new = NULL; + + if (spu) { + new = grab_runnable_context(max_prio, spu->node); + if (new || force) { + spu_unschedule(spu, ctx, new == NULL); + if (new) { + if (new->flags & SPU_CREATE_NOSCHED) + wake_up(&new->stop_wq); + else { + spu_release(ctx); + spu_schedule(spu, new); + /* this one can't easily be made + interruptible */ + mutex_lock(&ctx->state_mutex); + } + } + } + } + + return new != NULL; +} + +/** + * spu_deactivate - unbind a context from it's physical spu + * @ctx: spu context to unbind + * + * Unbind @ctx from the physical spu it is running on and schedule + * the highest priority context to run on the freed physical spu. + */ +void spu_deactivate(struct spu_context *ctx) +{ + spu_context_nospu_trace(spu_deactivate__enter, ctx); + __spu_deactivate(ctx, 1, MAX_PRIO); +} + +/** + * spu_yield - yield a physical spu if others are waiting + * @ctx: spu context to yield + * + * Check if there is a higher priority context waiting and if yes + * unbind @ctx from the physical spu and schedule the highest + * priority context to run on the freed physical spu instead. + */ +void spu_yield(struct spu_context *ctx) +{ + spu_context_nospu_trace(spu_yield__enter, ctx); + if (!(ctx->flags & SPU_CREATE_NOSCHED)) { + mutex_lock(&ctx->state_mutex); + __spu_deactivate(ctx, 0, MAX_PRIO); + mutex_unlock(&ctx->state_mutex); + } +} + +static noinline void spusched_tick(struct spu_context *ctx) +{ + struct spu_context *new = NULL; + struct spu *spu = NULL; + + if (spu_acquire(ctx)) + BUG(); /* a kernel thread never has signals pending */ + + if (ctx->state != SPU_STATE_RUNNABLE) + goto out; + if (ctx->flags & SPU_CREATE_NOSCHED) + goto out; + if (ctx->policy == SCHED_FIFO) + goto out; + + if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) + goto out; + + spu = ctx->spu; + + spu_context_trace(spusched_tick__preempt, ctx, spu); + + new = grab_runnable_context(ctx->prio + 1, spu->node); + if (new) { + spu_unschedule(spu, ctx, 0); + if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) + spu_add_to_rq(ctx); + } else { + spu_context_nospu_trace(spusched_tick__newslice, ctx); + if (!ctx->time_slice) + ctx->time_slice++; + } +out: + spu_release(ctx); + + if (new) + spu_schedule(spu, new); +} + +/** + * count_active_contexts - count nr of active tasks + * + * Return the number of tasks currently running or waiting to run. + * + * Note that we don't take runq_lock / list_mutex here. Reading + * a single 32bit value is atomic on powerpc, and we don't care + * about memory ordering issues here. + */ +static unsigned long count_active_contexts(void) +{ + int nr_active = 0, node; + + for (node = 0; node < MAX_NUMNODES; node++) + nr_active += cbe_spu_info[node].nr_active; + nr_active += spu_prio->nr_waiting; + + return nr_active; +} + +/** + * spu_calc_load - update the avenrun load estimates. + * + * No locking against reading these values from userspace, as for + * the CPU loadavg code. + */ +static void spu_calc_load(void) +{ + unsigned long active_tasks; /* fixed-point */ + + active_tasks = count_active_contexts() * FIXED_1; + spu_avenrun[0] = calc_load(spu_avenrun[0], EXP_1, active_tasks); + spu_avenrun[1] = calc_load(spu_avenrun[1], EXP_5, active_tasks); + spu_avenrun[2] = calc_load(spu_avenrun[2], EXP_15, active_tasks); +} + +static void spusched_wake(struct timer_list *unused) +{ + mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); + wake_up_process(spusched_task); +} + +static void spuloadavg_wake(struct timer_list *unused) +{ + mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ); + spu_calc_load(); +} + +static int spusched_thread(void *unused) +{ + struct spu *spu; + int node; + + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + for (node = 0; node < MAX_NUMNODES; node++) { + struct mutex *mtx = &cbe_spu_info[node].list_mutex; + + mutex_lock(mtx); + list_for_each_entry(spu, &cbe_spu_info[node].spus, + cbe_list) { + struct spu_context *ctx = spu->ctx; + + if (ctx) { + get_spu_context(ctx); + mutex_unlock(mtx); + spusched_tick(ctx); + mutex_lock(mtx); + put_spu_context(ctx); + } + } + mutex_unlock(mtx); + } + } + + return 0; +} + +void spuctx_switch_state(struct spu_context *ctx, + enum spu_utilization_state new_state) +{ + unsigned long long curtime; + signed long long delta; + struct spu *spu; + enum spu_utilization_state old_state; + int node; + + curtime = ktime_get_ns(); + delta = curtime - ctx->stats.tstamp; + + WARN_ON(!mutex_is_locked(&ctx->state_mutex)); + WARN_ON(delta < 0); + + spu = ctx->spu; + old_state = ctx->stats.util_state; + ctx->stats.util_state = new_state; + ctx->stats.tstamp = curtime; + + /* + * Update the physical SPU utilization statistics. + */ + if (spu) { + ctx->stats.times[old_state] += delta; + spu->stats.times[old_state] += delta; + spu->stats.util_state = new_state; + spu->stats.tstamp = curtime; + node = spu->node; + if (old_state == SPU_UTIL_USER) + atomic_dec(&cbe_spu_info[node].busy_spus); + if (new_state == SPU_UTIL_USER) + atomic_inc(&cbe_spu_info[node].busy_spus); + } +} + +#ifdef CONFIG_PROC_FS +static int show_spu_loadavg(struct seq_file *s, void *private) +{ + int a, b, c; + + a = spu_avenrun[0] + (FIXED_1/200); + b = spu_avenrun[1] + (FIXED_1/200); + c = spu_avenrun[2] + (FIXED_1/200); + + /* + * Note that last_pid doesn't really make much sense for the + * SPU loadavg (it even seems very odd on the CPU side...), + * but we include it here to have a 100% compatible interface. + */ + seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n", + LOAD_INT(a), LOAD_FRAC(a), + LOAD_INT(b), LOAD_FRAC(b), + LOAD_INT(c), LOAD_FRAC(c), + count_active_contexts(), + atomic_read(&nr_spu_contexts), + idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); + return 0; +} +#endif + +int __init spu_sched_init(void) +{ + struct proc_dir_entry *entry; + int err = -ENOMEM, i; + + spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL); + if (!spu_prio) + goto out; + + for (i = 0; i < MAX_PRIO; i++) { + INIT_LIST_HEAD(&spu_prio->runq[i]); + __clear_bit(i, spu_prio->bitmap); + } + spin_lock_init(&spu_prio->runq_lock); + + timer_setup(&spusched_timer, spusched_wake, 0); + timer_setup(&spuloadavg_timer, spuloadavg_wake, 0); + + spusched_task = kthread_run(spusched_thread, NULL, "spusched"); + if (IS_ERR(spusched_task)) { + err = PTR_ERR(spusched_task); + goto out_free_spu_prio; + } + + mod_timer(&spuloadavg_timer, 0); + + entry = proc_create_single("spu_loadavg", 0, NULL, show_spu_loadavg); + if (!entry) + goto out_stop_kthread; + + pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n", + SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE); + return 0; + + out_stop_kthread: + kthread_stop(spusched_task); + out_free_spu_prio: + kfree(spu_prio); + out: + return err; +} + +void spu_sched_exit(void) +{ + struct spu *spu; + int node; + + remove_proc_entry("spu_loadavg", NULL); + + del_timer_sync(&spusched_timer); + del_timer_sync(&spuloadavg_timer); + kthread_stop(spusched_task); + + for (node = 0; node < MAX_NUMNODES; node++) { + mutex_lock(&cbe_spu_info[node].list_mutex); + list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) + if (spu->alloc_state != SPU_FREE) + spu->alloc_state = SPU_FREE; + mutex_unlock(&cbe_spu_info[node].list_mutex); + } + kfree(spu_prio); +} diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore.c b/arch/powerpc/platforms/cell/spufs/spu_restore.c new file mode 100644 index 0000000000..2cbb6efb2d --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/spu_restore.c @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * spu_restore.c + * + * (C) Copyright IBM Corp. 2005 + * + * SPU-side context restore sequence outlined in + * Synergistic Processor Element Book IV + * + * Author: Mark Nutter + */ + + +#ifndef LS_SIZE +#define LS_SIZE 0x40000 /* 256K (in bytes) */ +#endif + +typedef unsigned int u32; +typedef unsigned long long u64; + +#include +#include +#include "spu_utils.h" + +#define BR_INSTR 0x327fff80 /* br -4 */ +#define NOP_INSTR 0x40200000 /* nop */ +#define HEQ_INSTR 0x7b000000 /* heq $0, $0 */ +#define STOP_INSTR 0x00000000 /* stop 0x0 */ +#define ILLEGAL_INSTR 0x00800000 /* illegal instr */ +#define RESTORE_COMPLETE 0x00003ffc /* stop 0x3ffc */ + +static inline void fetch_regs_from_mem(addr64 lscsa_ea) +{ + unsigned int ls = (unsigned int)®s_spill[0]; + unsigned int size = sizeof(regs_spill); + unsigned int tag_id = 0; + unsigned int cmd = 0x40; /* GET */ + + spu_writech(MFC_LSA, ls); + spu_writech(MFC_EAH, lscsa_ea.ui[0]); + spu_writech(MFC_EAL, lscsa_ea.ui[1]); + spu_writech(MFC_Size, size); + spu_writech(MFC_TagID, tag_id); + spu_writech(MFC_Cmd, cmd); +} + +static inline void restore_upper_240kb(addr64 lscsa_ea) +{ + unsigned int ls = 16384; + unsigned int list = (unsigned int)&dma_list[0]; + unsigned int size = sizeof(dma_list); + unsigned int tag_id = 0; + unsigned int cmd = 0x44; /* GETL */ + + /* Restore, Step 4: + * Enqueue the GETL command (tag 0) to the MFC SPU command + * queue to transfer the upper 240 kb of LS from CSA. + */ + spu_writech(MFC_LSA, ls); + spu_writech(MFC_EAH, lscsa_ea.ui[0]); + spu_writech(MFC_EAL, list); + spu_writech(MFC_Size, size); + spu_writech(MFC_TagID, tag_id); + spu_writech(MFC_Cmd, cmd); +} + +static inline void restore_decr(void) +{ + unsigned int offset; + unsigned int decr_running; + unsigned int decr; + + /* Restore, Step 6(moved): + * If the LSCSA "decrementer running" flag is set + * then write the SPU_WrDec channel with the + * decrementer value from LSCSA. + */ + offset = LSCSA_QW_OFFSET(decr_status); + decr_running = regs_spill[offset].slot[0] & SPU_DECR_STATUS_RUNNING; + if (decr_running) { + offset = LSCSA_QW_OFFSET(decr); + decr = regs_spill[offset].slot[0]; + spu_writech(SPU_WrDec, decr); + } +} + +static inline void write_ppu_mb(void) +{ + unsigned int offset; + unsigned int data; + + /* Restore, Step 11: + * Write the MFC_WrOut_MB channel with the PPU_MB + * data from LSCSA. + */ + offset = LSCSA_QW_OFFSET(ppu_mb); + data = regs_spill[offset].slot[0]; + spu_writech(SPU_WrOutMbox, data); +} + +static inline void write_ppuint_mb(void) +{ + unsigned int offset; + unsigned int data; + + /* Restore, Step 12: + * Write the MFC_WrInt_MB channel with the PPUINT_MB + * data from LSCSA. + */ + offset = LSCSA_QW_OFFSET(ppuint_mb); + data = regs_spill[offset].slot[0]; + spu_writech(SPU_WrOutIntrMbox, data); +} + +static inline void restore_fpcr(void) +{ + unsigned int offset; + vector unsigned int fpcr; + + /* Restore, Step 13: + * Restore the floating-point status and control + * register from the LSCSA. + */ + offset = LSCSA_QW_OFFSET(fpcr); + fpcr = regs_spill[offset].v; + spu_mtfpscr(fpcr); +} + +static inline void restore_srr0(void) +{ + unsigned int offset; + unsigned int srr0; + + /* Restore, Step 14: + * Restore the SPU SRR0 data from the LSCSA. + */ + offset = LSCSA_QW_OFFSET(srr0); + srr0 = regs_spill[offset].slot[0]; + spu_writech(SPU_WrSRR0, srr0); +} + +static inline void restore_event_mask(void) +{ + unsigned int offset; + unsigned int event_mask; + + /* Restore, Step 15: + * Restore the SPU_RdEventMsk data from the LSCSA. + */ + offset = LSCSA_QW_OFFSET(event_mask); + event_mask = regs_spill[offset].slot[0]; + spu_writech(SPU_WrEventMask, event_mask); +} + +static inline void restore_tag_mask(void) +{ + unsigned int offset; + unsigned int tag_mask; + + /* Restore, Step 16: + * Restore the SPU_RdTagMsk data from the LSCSA. + */ + offset = LSCSA_QW_OFFSET(tag_mask); + tag_mask = regs_spill[offset].slot[0]; + spu_writech(MFC_WrTagMask, tag_mask); +} + +static inline void restore_complete(void) +{ + extern void exit_fini(void); + unsigned int *exit_instrs = (unsigned int *)exit_fini; + unsigned int offset; + unsigned int stopped_status; + unsigned int stopped_code; + + /* Restore, Step 18: + * Issue a stop-and-signal instruction with + * "good context restore" signal value. + * + * Restore, Step 19: + * There may be additional instructions placed + * here by the PPE Sequence for SPU Context + * Restore in order to restore the correct + * "stopped state". + * + * This step is handled here by analyzing the + * LSCSA.stopped_status and then modifying the + * exit() function to behave appropriately. + */ + + offset = LSCSA_QW_OFFSET(stopped_status); + stopped_status = regs_spill[offset].slot[0]; + stopped_code = regs_spill[offset].slot[1]; + + switch (stopped_status) { + case SPU_STOPPED_STATUS_P_I: + /* SPU_Status[P,I]=1. Add illegal instruction + * followed by stop-and-signal instruction after + * end of restore code. + */ + exit_instrs[0] = RESTORE_COMPLETE; + exit_instrs[1] = ILLEGAL_INSTR; + exit_instrs[2] = STOP_INSTR | stopped_code; + break; + case SPU_STOPPED_STATUS_P_H: + /* SPU_Status[P,H]=1. Add 'heq $0, $0' followed + * by stop-and-signal instruction after end of + * restore code. + */ + exit_instrs[0] = RESTORE_COMPLETE; + exit_instrs[1] = HEQ_INSTR; + exit_instrs[2] = STOP_INSTR | stopped_code; + break; + case SPU_STOPPED_STATUS_S_P: + /* SPU_Status[S,P]=1. Add nop instruction + * followed by 'br -4' after end of restore + * code. + */ + exit_instrs[0] = RESTORE_COMPLETE; + exit_instrs[1] = STOP_INSTR | stopped_code; + exit_instrs[2] = NOP_INSTR; + exit_instrs[3] = BR_INSTR; + break; + case SPU_STOPPED_STATUS_S_I: + /* SPU_Status[S,I]=1. Add illegal instruction + * followed by 'br -4' after end of restore code. + */ + exit_instrs[0] = RESTORE_COMPLETE; + exit_instrs[1] = ILLEGAL_INSTR; + exit_instrs[2] = NOP_INSTR; + exit_instrs[3] = BR_INSTR; + break; + case SPU_STOPPED_STATUS_I: + /* SPU_Status[I]=1. Add illegal instruction followed + * by infinite loop after end of restore sequence. + */ + exit_instrs[0] = RESTORE_COMPLETE; + exit_instrs[1] = ILLEGAL_INSTR; + exit_instrs[2] = NOP_INSTR; + exit_instrs[3] = BR_INSTR; + break; + case SPU_STOPPED_STATUS_S: + /* SPU_Status[S]=1. Add two 'nop' instructions. */ + exit_instrs[0] = RESTORE_COMPLETE; + exit_instrs[1] = NOP_INSTR; + exit_instrs[2] = NOP_INSTR; + exit_instrs[3] = BR_INSTR; + break; + case SPU_STOPPED_STATUS_H: + /* SPU_Status[H]=1. Add 'heq $0, $0' instruction + * after end of restore code. + */ + exit_instrs[0] = RESTORE_COMPLETE; + exit_instrs[1] = HEQ_INSTR; + exit_instrs[2] = NOP_INSTR; + exit_instrs[3] = BR_INSTR; + break; + case SPU_STOPPED_STATUS_P: + /* SPU_Status[P]=1. Add stop-and-signal instruction + * after end of restore code. + */ + exit_instrs[0] = RESTORE_COMPLETE; + exit_instrs[1] = STOP_INSTR | stopped_code; + break; + case SPU_STOPPED_STATUS_R: + /* SPU_Status[I,S,H,P,R]=0. Add infinite loop. */ + exit_instrs[0] = RESTORE_COMPLETE; + exit_instrs[1] = NOP_INSTR; + exit_instrs[2] = NOP_INSTR; + exit_instrs[3] = BR_INSTR; + break; + default: + /* SPU_Status[R]=1. No additional instructions. */ + break; + } + spu_sync(); +} + +/** + * main - entry point for SPU-side context restore. + * + * This code deviates from the documented sequence in the + * following aspects: + * + * 1. The EA for LSCSA is passed from PPE in the + * signal notification channels. + * 2. The register spill area is pulled by SPU + * into LS, rather than pushed by PPE. + * 3. All 128 registers are restored by exit(). + * 4. The exit() function is modified at run + * time in order to properly restore the + * SPU_Status register. + */ +int main() +{ + addr64 lscsa_ea; + + lscsa_ea.ui[0] = spu_readch(SPU_RdSigNotify1); + lscsa_ea.ui[1] = spu_readch(SPU_RdSigNotify2); + fetch_regs_from_mem(lscsa_ea); + + set_event_mask(); /* Step 1. */ + set_tag_mask(); /* Step 2. */ + build_dma_list(lscsa_ea); /* Step 3. */ + restore_upper_240kb(lscsa_ea); /* Step 4. */ + /* Step 5: done by 'exit'. */ + enqueue_putllc(lscsa_ea); /* Step 7. */ + set_tag_update(); /* Step 8. */ + read_tag_status(); /* Step 9. */ + restore_decr(); /* moved Step 6. */ + read_llar_status(); /* Step 10. */ + write_ppu_mb(); /* Step 11. */ + write_ppuint_mb(); /* Step 12. */ + restore_fpcr(); /* Step 13. */ + restore_srr0(); /* Step 14. */ + restore_event_mask(); /* Step 15. */ + restore_tag_mask(); /* Step 16. */ + /* Step 17. done by 'exit'. */ + restore_complete(); /* Step 18. */ + + return 0; +} diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore_crt0.S b/arch/powerpc/platforms/cell/spufs/spu_restore_crt0.S new file mode 100644 index 0000000000..6d799f8476 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/spu_restore_crt0.S @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * crt0_r.S: Entry function for SPU-side context restore. + * + * Copyright (C) 2005 IBM + * + * Entry and exit function for SPU-side of the context restore + * sequence. Sets up an initial stack frame, then branches to + * 'main'. On return, restores all 128 registers from the LSCSA + * and exits. + */ + +#include + +.data +.align 7 +.globl regs_spill +regs_spill: +.space SIZEOF_SPU_SPILL_REGS, 0x0 + +.text +.global _start +_start: + /* Initialize the stack pointer to point to 16368 + * (16kb-16). The back chain pointer is initialized + * to NULL. + */ + il $0, 0 + il $SP, 16368 + stqd $0, 0($SP) + + /* Allocate a minimum stack frame for the called main. + * This is needed so that main has a place to save the + * link register when it calls another function. + */ + stqd $SP, -160($SP) + ai $SP, $SP, -160 + + /* Call the program's main function. */ + brsl $0, main + +.global exit +.global _exit +exit: +_exit: + /* SPU Context Restore, Step 5: Restore the remaining 112 GPRs. */ + ila $3, regs_spill + 256 +restore_regs: + lqr $4, restore_reg_insts +restore_reg_loop: + ai $4, $4, 4 + .balignl 16, 0x40200000 +restore_reg_insts: /* must be quad-word aligned. */ + lqd $16, 0($3) + lqd $17, 16($3) + lqd $18, 32($3) + lqd $19, 48($3) + andi $5, $4, 0x7F + stqr $4, restore_reg_insts + ai $3, $3, 64 + brnz $5, restore_reg_loop + + /* SPU Context Restore Step 17: Restore the first 16 GPRs. */ + lqa $0, regs_spill + 0 + lqa $1, regs_spill + 16 + lqa $2, regs_spill + 32 + lqa $3, regs_spill + 48 + lqa $4, regs_spill + 64 + lqa $5, regs_spill + 80 + lqa $6, regs_spill + 96 + lqa $7, regs_spill + 112 + lqa $8, regs_spill + 128 + lqa $9, regs_spill + 144 + lqa $10, regs_spill + 160 + lqa $11, regs_spill + 176 + lqa $12, regs_spill + 192 + lqa $13, regs_spill + 208 + lqa $14, regs_spill + 224 + lqa $15, regs_spill + 240 + + /* Under normal circumstances, the 'exit' function + * terminates with 'stop SPU_RESTORE_COMPLETE', + * indicating that the SPU-side restore code has + * completed. + * + * However it is possible that instructions immediately + * following the 'stop 0x3ffc' have been modified at run + * time so as to recreate the exact SPU_Status settings + * from the application, e.g. illegal instruciton, halt, + * etc. + */ +.global exit_fini +.global _exit_fini +exit_fini: +_exit_fini: + stop SPU_RESTORE_COMPLETE + stop 0 + stop 0 + stop 0 + + /* Pad the size of this crt0.o to be multiple of 16 bytes. */ +.balignl 16, 0x0 diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped b/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped new file mode 100644 index 0000000000..f383b027e8 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped @@ -0,0 +1,935 @@ +/* + * spu_restore_dump.h: Copyright (C) 2005 IBM. + * Hex-dump auto generated from spu_restore.c. + * Do not edit! + */ +static unsigned int spu_restore_code[] __attribute__((__aligned__(128))) = { +0x40800000, +0x409ff801, +0x24000080, +0x24fd8081, +0x1cd80081, +0x33001180, +0x42034003, +0x33800284, +0x1c010204, +0x40200000, +0x40200000, +0x40200000, +0x34000190, +0x34004191, +0x34008192, +0x3400c193, +0x141fc205, +0x23fffd84, +0x1c100183, +0x217ffa85, +0x3080b000, +0x3080b201, +0x3080b402, +0x3080b603, +0x3080b804, +0x3080ba05, +0x3080bc06, +0x3080be07, +0x3080c008, +0x3080c209, +0x3080c40a, +0x3080c60b, +0x3080c80c, +0x3080ca0d, +0x3080cc0e, +0x3080ce0f, +0x00003ffc, +0x00000000, +0x00000000, +0x00000000, +0x01a00182, +0x3ec00083, +0xb0a14103, +0x01a00204, +0x3ec10083, +0x4202c002, +0xb0a14203, +0x21a00802, +0x3fbf028a, +0x3f20050a, +0x3fbe0502, +0x3fe30102, +0x21a00882, +0x3f82028b, +0x3fe3058b, +0x3fbf0584, +0x3f200204, +0x3fbe0204, +0x3fe30204, +0x04000203, +0x21a00903, +0x40848002, +0x21a00982, +0x40800003, +0x21a00a03, +0x40802002, +0x21a00a82, +0x21a00083, +0x40800082, +0x21a00b02, +0x10002612, +0x42a00003, +0x42074006, +0x1800c204, +0x40a00008, +0x40800789, +0x1c010305, +0x34000302, +0x1cffc489, +0x3ec00303, +0x3ec00287, +0xb0408403, +0x24000302, +0x34000282, +0x1c020306, +0xb0408207, +0x18020204, +0x24000282, +0x217ffa09, +0x04000402, +0x21a00802, +0x3fbe0504, +0x3fe30204, +0x21a00884, +0x42074002, +0x21a00902, +0x40803c03, +0x21a00983, +0x04000485, +0x21a00a05, +0x40802202, +0x21a00a82, +0x21a00805, +0x21a00884, +0x3fbf0582, +0x3f200102, +0x3fbe0102, +0x3fe30102, +0x21a00902, +0x40804003, +0x21a00983, +0x21a00a05, +0x40805a02, +0x21a00a82, +0x40800083, +0x21a00b83, +0x01a00c02, +0x30809c03, +0x34000182, +0x14004102, +0x21002082, +0x01a00d82, +0x3080a003, +0x34000182, +0x21a00e02, +0x3080a203, +0x34000182, +0x21a00f02, +0x3080a403, +0x34000182, +0x77400100, +0x3080a603, +0x34000182, +0x21a00702, +0x3080a803, +0x34000182, +0x21a00082, +0x3080aa03, +0x34000182, +0x21a00b02, +0x4020007f, +0x3080ae02, +0x42004805, +0x3080ac04, +0x34000103, +0x34000202, +0x1cffc183, +0x3b810106, +0x0f608184, +0x42013802, +0x5c020183, +0x38810102, +0x3b810102, +0x21000e83, +0x4020007f, +0x35000100, +0x00000470, +0x000002f8, +0x00000430, +0x00000360, +0x000002f8, +0x000003c8, +0x000004a8, +0x00000298, +0x00000360, +0x00200000, +0x409ffe02, +0x30801203, +0x40800208, +0x3ec40084, +0x40800407, +0x3ac20289, +0xb060c104, +0x3ac1c284, +0x20801203, +0x38820282, +0x41004003, +0xb0408189, +0x28820282, +0x3881c282, +0xb0408304, +0x2881c282, +0x00400000, +0x40800003, +0x35000000, +0x30809e03, +0x34000182, +0x21a00382, +0x4020007f, +0x327fde00, +0x409ffe02, +0x30801203, +0x40800206, +0x3ec40084, +0x40800407, +0x40800608, +0x3ac1828a, +0x3ac20289, +0xb060c104, +0x3ac1c284, +0x20801203, +0x38818282, +0x41004003, +0xb040818a, +0x10005b0b, +0x41201003, +0x28818282, +0x3881c282, +0xb0408184, +0x41193f83, +0x60ffc003, +0x2881c282, +0x38820282, +0xb0408189, +0x28820282, +0x327fef80, +0x409ffe02, +0x30801203, +0x40800207, +0x3ec40086, +0x4120100b, +0x10005b14, +0x40800404, +0x3ac1c289, +0x40800608, +0xb060c106, +0x3ac10286, +0x3ac2028a, +0x20801203, +0x3881c282, +0x41193f83, +0x60ffc003, +0xb0408589, +0x2881c282, +0x38810282, +0xb0408586, +0x28810282, +0x38820282, +0xb040818a, +0x28820282, +0x4020007f, +0x327fe280, +0x409ffe02, +0x30801203, +0x40800207, +0x3ec40084, +0x40800408, +0x10005b14, +0x40800609, +0x3ac1c28a, +0x3ac2028b, +0xb060c104, +0x3ac24284, +0x20801203, +0x41201003, +0x3881c282, +0xb040830a, +0x2881c282, +0x38820282, +0xb040818b, +0x41193f83, +0x60ffc003, +0x28820282, +0x38824282, +0xb0408184, +0x28824282, +0x4020007f, +0x327fd580, +0x409ffe02, +0x1000658e, +0x40800206, +0x30801203, +0x40800407, +0x3ec40084, +0x40800608, +0x3ac1828a, +0x3ac20289, +0xb060c104, +0x3ac1c284, +0x20801203, +0x413d8003, +0x38818282, +0x4020007f, +0x327fd800, +0x409ffe03, +0x30801202, +0x40800207, +0x3ec40084, +0x10005b09, +0x3ac1c288, +0xb0408184, +0x4020007f, +0x4020007f, +0x20801202, +0x3881c282, +0xb0408308, +0x2881c282, +0x327fc680, +0x409ffe02, +0x1000588b, +0x40800208, +0x30801203, +0x40800407, +0x3ec40084, +0x3ac20289, +0xb060c104, +0x3ac1c284, +0x20801203, +0x413d8003, +0x38820282, +0x327fbd80, +0x00200000, +0x00000da0, +0x00000000, +0x00000000, +0x00000000, +0x00000d90, +0x00000000, +0x00000000, +0x00000000, +0x00000db0, +0x00000000, +0x00000000, +0x00000000, +0x00000dc0, +0x00000000, +0x00000000, +0x00000000, +0x00000d80, +0x00000000, +0x00000000, +0x00000000, +0x00000df0, +0x00000000, +0x00000000, +0x00000000, +0x00000de0, +0x00000000, +0x00000000, +0x00000000, +0x00000dd0, +0x00000000, +0x00000000, +0x00000000, +0x00000e04, +0x00000000, +0x00000000, +0x00000000, +0x00000e00, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +}; diff --git a/arch/powerpc/platforms/cell/spufs/spu_save.c b/arch/powerpc/platforms/cell/spufs/spu_save.c new file mode 100644 index 0000000000..28c88e3243 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/spu_save.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * spu_save.c + * + * (C) Copyright IBM Corp. 2005 + * + * SPU-side context save sequence outlined in + * Synergistic Processor Element Book IV + * + * Author: Mark Nutter + */ + + +#ifndef LS_SIZE +#define LS_SIZE 0x40000 /* 256K (in bytes) */ +#endif + +typedef unsigned int u32; +typedef unsigned long long u64; + +#include +#include +#include "spu_utils.h" + +static inline void save_event_mask(void) +{ + unsigned int offset; + + /* Save, Step 2: + * Read the SPU_RdEventMsk channel and save to the LSCSA. + */ + offset = LSCSA_QW_OFFSET(event_mask); + regs_spill[offset].slot[0] = spu_readch(SPU_RdEventMask); +} + +static inline void save_tag_mask(void) +{ + unsigned int offset; + + /* Save, Step 3: + * Read the SPU_RdTagMsk channel and save to the LSCSA. + */ + offset = LSCSA_QW_OFFSET(tag_mask); + regs_spill[offset].slot[0] = spu_readch(MFC_RdTagMask); +} + +static inline void save_upper_240kb(addr64 lscsa_ea) +{ + unsigned int ls = 16384; + unsigned int list = (unsigned int)&dma_list[0]; + unsigned int size = sizeof(dma_list); + unsigned int tag_id = 0; + unsigned int cmd = 0x24; /* PUTL */ + + /* Save, Step 7: + * Enqueue the PUTL command (tag 0) to the MFC SPU command + * queue to transfer the remaining 240 kb of LS to CSA. + */ + spu_writech(MFC_LSA, ls); + spu_writech(MFC_EAH, lscsa_ea.ui[0]); + spu_writech(MFC_EAL, list); + spu_writech(MFC_Size, size); + spu_writech(MFC_TagID, tag_id); + spu_writech(MFC_Cmd, cmd); +} + +static inline void save_fpcr(void) +{ + // vector unsigned int fpcr; + unsigned int offset; + + /* Save, Step 9: + * Issue the floating-point status and control register + * read instruction, and save to the LSCSA. + */ + offset = LSCSA_QW_OFFSET(fpcr); + regs_spill[offset].v = spu_mffpscr(); +} + +static inline void save_decr(void) +{ + unsigned int offset; + + /* Save, Step 10: + * Read and save the SPU_RdDec channel data to + * the LSCSA. + */ + offset = LSCSA_QW_OFFSET(decr); + regs_spill[offset].slot[0] = spu_readch(SPU_RdDec); +} + +static inline void save_srr0(void) +{ + unsigned int offset; + + /* Save, Step 11: + * Read and save the SPU_WSRR0 channel data to + * the LSCSA. + */ + offset = LSCSA_QW_OFFSET(srr0); + regs_spill[offset].slot[0] = spu_readch(SPU_RdSRR0); +} + +static inline void spill_regs_to_mem(addr64 lscsa_ea) +{ + unsigned int ls = (unsigned int)®s_spill[0]; + unsigned int size = sizeof(regs_spill); + unsigned int tag_id = 0; + unsigned int cmd = 0x20; /* PUT */ + + /* Save, Step 13: + * Enqueue a PUT command (tag 0) to send the LSCSA + * to the CSA. + */ + spu_writech(MFC_LSA, ls); + spu_writech(MFC_EAH, lscsa_ea.ui[0]); + spu_writech(MFC_EAL, lscsa_ea.ui[1]); + spu_writech(MFC_Size, size); + spu_writech(MFC_TagID, tag_id); + spu_writech(MFC_Cmd, cmd); +} + +static inline void enqueue_sync(addr64 lscsa_ea) +{ + unsigned int tag_id = 0; + unsigned int cmd = 0xCC; + + /* Save, Step 14: + * Enqueue an MFC_SYNC command (tag 0). + */ + spu_writech(MFC_TagID, tag_id); + spu_writech(MFC_Cmd, cmd); +} + +static inline void save_complete(void) +{ + /* Save, Step 18: + * Issue a stop-and-signal instruction indicating + * "save complete". Note: This function will not + * return!! + */ + spu_stop(SPU_SAVE_COMPLETE); +} + +/** + * main - entry point for SPU-side context save. + * + * This code deviates from the documented sequence as follows: + * + * 1. The EA for LSCSA is passed from PPE in the + * signal notification channels. + * 2. All 128 registers are saved by crt0.o. + */ +int main() +{ + addr64 lscsa_ea; + + lscsa_ea.ui[0] = spu_readch(SPU_RdSigNotify1); + lscsa_ea.ui[1] = spu_readch(SPU_RdSigNotify2); + + /* Step 1: done by exit(). */ + save_event_mask(); /* Step 2. */ + save_tag_mask(); /* Step 3. */ + set_event_mask(); /* Step 4. */ + set_tag_mask(); /* Step 5. */ + build_dma_list(lscsa_ea); /* Step 6. */ + save_upper_240kb(lscsa_ea); /* Step 7. */ + /* Step 8: done by exit(). */ + save_fpcr(); /* Step 9. */ + save_decr(); /* Step 10. */ + save_srr0(); /* Step 11. */ + enqueue_putllc(lscsa_ea); /* Step 12. */ + spill_regs_to_mem(lscsa_ea); /* Step 13. */ + enqueue_sync(lscsa_ea); /* Step 14. */ + set_tag_update(); /* Step 15. */ + read_tag_status(); /* Step 16. */ + read_llar_status(); /* Step 17. */ + save_complete(); /* Step 18. */ + + return 0; +} diff --git a/arch/powerpc/platforms/cell/spufs/spu_save_crt0.S b/arch/powerpc/platforms/cell/spufs/spu_save_crt0.S new file mode 100644 index 0000000000..5ce32efdca --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/spu_save_crt0.S @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * crt0_s.S: Entry function for SPU-side context save. + * + * Copyright (C) 2005 IBM + * + * Entry function for SPU-side of the context save sequence. + * Saves all 128 GPRs, sets up an initial stack frame, then + * branches to 'main'. + */ + +#include + +.data +.align 7 +.globl regs_spill +regs_spill: +.space SIZEOF_SPU_SPILL_REGS, 0x0 + +.text +.global _start +_start: + /* SPU Context Save Step 1: Save the first 16 GPRs. */ + stqa $0, regs_spill + 0 + stqa $1, regs_spill + 16 + stqa $2, regs_spill + 32 + stqa $3, regs_spill + 48 + stqa $4, regs_spill + 64 + stqa $5, regs_spill + 80 + stqa $6, regs_spill + 96 + stqa $7, regs_spill + 112 + stqa $8, regs_spill + 128 + stqa $9, regs_spill + 144 + stqa $10, regs_spill + 160 + stqa $11, regs_spill + 176 + stqa $12, regs_spill + 192 + stqa $13, regs_spill + 208 + stqa $14, regs_spill + 224 + stqa $15, regs_spill + 240 + + /* SPU Context Save, Step 8: Save the remaining 112 GPRs. */ + ila $3, regs_spill + 256 +save_regs: + lqr $4, save_reg_insts +save_reg_loop: + ai $4, $4, 4 + .balignl 16, 0x40200000 +save_reg_insts: /* must be quad-word aligned. */ + stqd $16, 0($3) + stqd $17, 16($3) + stqd $18, 32($3) + stqd $19, 48($3) + andi $5, $4, 0x7F + stqr $4, save_reg_insts + ai $3, $3, 64 + brnz $5, save_reg_loop + + /* Initialize the stack pointer to point to 16368 + * (16kb-16). The back chain pointer is initialized + * to NULL. + */ + il $0, 0 + il $SP, 16368 + stqd $0, 0($SP) + + /* Allocate a minimum stack frame for the called main. + * This is needed so that main has a place to save the + * link register when it calls another function. + */ + stqd $SP, -160($SP) + ai $SP, $SP, -160 + + /* Call the program's main function. */ + brsl $0, main + + /* In this case main should not return; if it does + * there has been an error in the sequence. Execute + * stop-and-signal with code=0. + */ +.global exit +.global _exit +exit: +_exit: + stop 0x0 + + /* Pad the size of this crt0.o to be multiple of 16 bytes. */ +.balignl 16, 0x0 + diff --git a/arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped b/arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped new file mode 100644 index 0000000000..b9f81ac8a6 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped @@ -0,0 +1,743 @@ +/* + * spu_save_dump.h: Copyright (C) 2005 IBM. + * Hex-dump auto generated from spu_save.c. + * Do not edit! + */ +static unsigned int spu_save_code[] __attribute__((__aligned__(128))) = { +0x20805000, +0x20805201, +0x20805402, +0x20805603, +0x20805804, +0x20805a05, +0x20805c06, +0x20805e07, +0x20806008, +0x20806209, +0x2080640a, +0x2080660b, +0x2080680c, +0x20806a0d, +0x20806c0e, +0x20806e0f, +0x4201c003, +0x33800184, +0x1c010204, +0x40200000, +0x24000190, +0x24004191, +0x24008192, +0x2400c193, +0x141fc205, +0x23fffd84, +0x1c100183, +0x217ffb85, +0x40800000, +0x409ff801, +0x24000080, +0x24fd8081, +0x1cd80081, +0x33000180, +0x00000000, +0x00000000, +0x01a00182, +0x3ec00083, +0xb1c38103, +0x01a00204, +0x3ec10082, +0x4201400d, +0xb1c38202, +0x01a00583, +0x34218682, +0x3ed80684, +0xb0408184, +0x24218682, +0x01a00603, +0x00200000, +0x34214682, +0x3ed40684, +0xb0408184, +0x40800003, +0x24214682, +0x21a00083, +0x40800082, +0x21a00b02, +0x4020007f, +0x1000251e, +0x42a00002, +0x32800008, +0x4205c00c, +0x00200000, +0x40a0000b, +0x3f82070f, +0x4080020a, +0x40800709, +0x3fe3078f, +0x3fbf0783, +0x3f200183, +0x3fbe0183, +0x3fe30187, +0x18008387, +0x4205c002, +0x3ac30404, +0x1cffc489, +0x00200000, +0x18008403, +0x38830402, +0x4cffc486, +0x3ac28185, +0xb0408584, +0x28830402, +0x1c020408, +0x38828182, +0xb0408385, +0x1802c387, +0x28828182, +0x217ff886, +0x04000582, +0x32800007, +0x21a00802, +0x3fbf0705, +0x3f200285, +0x3fbe0285, +0x3fe30285, +0x21a00885, +0x04000603, +0x21a00903, +0x40803c02, +0x21a00982, +0x04000386, +0x21a00a06, +0x40801202, +0x21a00a82, +0x73000003, +0x24200683, +0x01a00404, +0x00200000, +0x34204682, +0x3ec40683, +0xb0408203, +0x24204682, +0x01a00783, +0x00200000, +0x3421c682, +0x3edc0684, +0xb0408184, +0x2421c682, +0x21a00806, +0x21a00885, +0x3fbf0784, +0x3f200204, +0x3fbe0204, +0x3fe30204, +0x21a00904, +0x40804002, +0x21a00982, +0x21a00a06, +0x40805a02, +0x21a00a82, +0x04000683, +0x21a00803, +0x21a00885, +0x21a00904, +0x40848002, +0x21a00982, +0x21a00a06, +0x40801002, +0x21a00a82, +0x21a00a06, +0x40806602, +0x00200000, +0x35800009, +0x21a00a82, +0x40800083, +0x21a00b83, +0x01a00c02, +0x01a00d83, +0x00003ffb, +0x40800003, +0x4020007f, +0x35000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +0x00000000, +}; diff --git a/arch/powerpc/platforms/cell/spufs/spu_utils.h b/arch/powerpc/platforms/cell/spufs/spu_utils.h new file mode 100644 index 0000000000..4fc1ebb45e --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/spu_utils.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * utils.h: Utilities for SPU-side of the context switch operation. + * + * (C) Copyright IBM 2005 + */ + +#ifndef _SPU_CONTEXT_UTILS_H_ +#define _SPU_CONTEXT_UTILS_H_ + +/* + * 64-bit safe EA. + */ +typedef union { + unsigned long long ull; + unsigned int ui[2]; +} addr64; + +/* + * 128-bit register template. + */ +typedef union { + unsigned int slot[4]; + vector unsigned int v; +} spu_reg128v; + +/* + * DMA list structure. + */ +struct dma_list_elem { + unsigned int size; + unsigned int ea_low; +}; + +/* + * Declare storage for 8-byte aligned DMA list. + */ +struct dma_list_elem dma_list[15] __attribute__ ((aligned(8))); + +/* + * External definition for storage + * declared in crt0. + */ +extern spu_reg128v regs_spill[NR_SPU_SPILL_REGS]; + +/* + * Compute LSCSA byte offset for a given field. + */ +static struct spu_lscsa *dummy = (struct spu_lscsa *)0; +#define LSCSA_BYTE_OFFSET(_field) \ + ((char *)(&(dummy->_field)) - (char *)(&(dummy->gprs[0].slot[0]))) +#define LSCSA_QW_OFFSET(_field) (LSCSA_BYTE_OFFSET(_field) >> 4) + +static inline void set_event_mask(void) +{ + unsigned int event_mask = 0; + + /* Save, Step 4: + * Restore, Step 1: + * Set the SPU_RdEventMsk channel to zero to mask + * all events. + */ + spu_writech(SPU_WrEventMask, event_mask); +} + +static inline void set_tag_mask(void) +{ + unsigned int tag_mask = 1; + + /* Save, Step 5: + * Restore, Step 2: + * Set the SPU_WrTagMsk channel to '01' to unmask + * only tag group 0. + */ + spu_writech(MFC_WrTagMask, tag_mask); +} + +static inline void build_dma_list(addr64 lscsa_ea) +{ + unsigned int ea_low; + int i; + + /* Save, Step 6: + * Restore, Step 3: + * Update the effective address for the CSA in the + * pre-canned DMA-list in local storage. + */ + ea_low = lscsa_ea.ui[1]; + ea_low += LSCSA_BYTE_OFFSET(ls[16384]); + + for (i = 0; i < 15; i++, ea_low += 16384) { + dma_list[i].size = 16384; + dma_list[i].ea_low = ea_low; + } +} + +static inline void enqueue_putllc(addr64 lscsa_ea) +{ + unsigned int ls = 0; + unsigned int size = 128; + unsigned int tag_id = 0; + unsigned int cmd = 0xB4; /* PUTLLC */ + + /* Save, Step 12: + * Restore, Step 7: + * Send a PUTLLC (tag 0) command to the MFC using + * an effective address in the CSA in order to + * remove any possible lock-line reservation. + */ + spu_writech(MFC_LSA, ls); + spu_writech(MFC_EAH, lscsa_ea.ui[0]); + spu_writech(MFC_EAL, lscsa_ea.ui[1]); + spu_writech(MFC_Size, size); + spu_writech(MFC_TagID, tag_id); + spu_writech(MFC_Cmd, cmd); +} + +static inline void set_tag_update(void) +{ + unsigned int update_any = 1; + + /* Save, Step 15: + * Restore, Step 8: + * Write the MFC_TagUpdate channel with '01'. + */ + spu_writech(MFC_WrTagUpdate, update_any); +} + +static inline void read_tag_status(void) +{ + /* Save, Step 16: + * Restore, Step 9: + * Read the MFC_TagStat channel data. + */ + spu_readch(MFC_RdTagStat); +} + +static inline void read_llar_status(void) +{ + /* Save, Step 17: + * Restore, Step 10: + * Read the MFC_AtomicStat channel data. + */ + spu_readch(MFC_RdAtomicStat); +} + +#endif /* _SPU_CONTEXT_UTILS_H_ */ diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h new file mode 100644 index 0000000000..84958487f6 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -0,0 +1,356 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SPU file system + * + * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 + * + * Author: Arnd Bergmann + */ +#ifndef SPUFS_H +#define SPUFS_H + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define SPUFS_PS_MAP_SIZE 0x20000 +#define SPUFS_MFC_MAP_SIZE 0x1000 +#define SPUFS_CNTL_MAP_SIZE 0x1000 +#define SPUFS_SIGNAL_MAP_SIZE PAGE_SIZE +#define SPUFS_MSS_MAP_SIZE 0x1000 + +/* The magic number for our file system */ +enum { + SPUFS_MAGIC = 0x23c9b64e, +}; + +struct spu_context_ops; +struct spu_gang; + +/* ctx->sched_flags */ +enum { + SPU_SCHED_NOTIFY_ACTIVE, + SPU_SCHED_WAS_ACTIVE, /* was active upon spu_acquire_saved() */ + SPU_SCHED_SPU_RUN, /* context is within spu_run */ +}; + +enum { + SWITCH_LOG_BUFSIZE = 4096, +}; + +enum { + SWITCH_LOG_START, + SWITCH_LOG_STOP, + SWITCH_LOG_EXIT, +}; + +struct switch_log { + wait_queue_head_t wait; + unsigned long head; + unsigned long tail; + struct switch_log_entry { + struct timespec64 tstamp; + s32 spu_id; + u32 type; + u32 val; + u64 timebase; + } log[]; +}; + +struct spu_context { + struct spu *spu; /* pointer to a physical SPU */ + struct spu_state csa; /* SPU context save area. */ + spinlock_t mmio_lock; /* protects mmio access */ + struct address_space *local_store; /* local store mapping. */ + struct address_space *mfc; /* 'mfc' area mappings. */ + struct address_space *cntl; /* 'control' area mappings. */ + struct address_space *signal1; /* 'signal1' area mappings. */ + struct address_space *signal2; /* 'signal2' area mappings. */ + struct address_space *mss; /* 'mss' area mappings. */ + struct address_space *psmap; /* 'psmap' area mappings. */ + struct mutex mapping_lock; + u64 object_id; /* user space pointer for GNU Debugger */ + + enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state; + struct mutex state_mutex; + struct mutex run_mutex; + + struct mm_struct *owner; + + struct kref kref; + wait_queue_head_t ibox_wq; + wait_queue_head_t wbox_wq; + wait_queue_head_t stop_wq; + wait_queue_head_t mfc_wq; + wait_queue_head_t run_wq; + u32 tagwait; + struct spu_context_ops *ops; + struct work_struct reap_work; + unsigned long flags; + unsigned long event_return; + + struct list_head gang_list; + struct spu_gang *gang; + struct kref *prof_priv_kref; + void ( * prof_priv_release) (struct kref *kref); + + /* owner thread */ + pid_t tid; + + /* scheduler fields */ + struct list_head rq; + unsigned int time_slice; + unsigned long sched_flags; + cpumask_t cpus_allowed; + int policy; + int prio; + int last_ran; + + /* statistics */ + struct { + /* updates protected by ctx->state_mutex */ + enum spu_utilization_state util_state; + unsigned long long tstamp; /* time of last state switch */ + unsigned long long times[SPU_UTIL_MAX]; + unsigned long long vol_ctx_switch; + unsigned long long invol_ctx_switch; + unsigned long long min_flt; + unsigned long long maj_flt; + unsigned long long hash_flt; + unsigned long long slb_flt; + unsigned long long slb_flt_base; /* # at last ctx switch */ + unsigned long long class2_intr; + unsigned long long class2_intr_base; /* # at last ctx switch */ + unsigned long long libassist; + } stats; + + /* context switch log */ + struct switch_log *switch_log; + + struct list_head aff_list; + int aff_head; + int aff_offset; +}; + +struct spu_gang { + struct list_head list; + struct mutex mutex; + struct kref kref; + int contexts; + + struct spu_context *aff_ref_ctx; + struct list_head aff_list_head; + struct mutex aff_mutex; + int aff_flags; + struct spu *aff_ref_spu; + atomic_t aff_sched_count; +}; + +/* Flag bits for spu_gang aff_flags */ +#define AFF_OFFSETS_SET 1 +#define AFF_MERGED 2 + +struct mfc_dma_command { + int32_t pad; /* reserved */ + uint32_t lsa; /* local storage address */ + uint64_t ea; /* effective address */ + uint16_t size; /* transfer size */ + uint16_t tag; /* command tag */ + uint16_t class; /* class ID */ + uint16_t cmd; /* command opcode */ +}; + + +/* SPU context query/set operations. */ +struct spu_context_ops { + int (*mbox_read) (struct spu_context * ctx, u32 * data); + u32(*mbox_stat_read) (struct spu_context * ctx); + __poll_t (*mbox_stat_poll)(struct spu_context *ctx, __poll_t events); + int (*ibox_read) (struct spu_context * ctx, u32 * data); + int (*wbox_write) (struct spu_context * ctx, u32 data); + u32(*signal1_read) (struct spu_context * ctx); + void (*signal1_write) (struct spu_context * ctx, u32 data); + u32(*signal2_read) (struct spu_context * ctx); + void (*signal2_write) (struct spu_context * ctx, u32 data); + void (*signal1_type_set) (struct spu_context * ctx, u64 val); + u64(*signal1_type_get) (struct spu_context * ctx); + void (*signal2_type_set) (struct spu_context * ctx, u64 val); + u64(*signal2_type_get) (struct spu_context * ctx); + u32(*npc_read) (struct spu_context * ctx); + void (*npc_write) (struct spu_context * ctx, u32 data); + u32(*status_read) (struct spu_context * ctx); + char*(*get_ls) (struct spu_context * ctx); + void (*privcntl_write) (struct spu_context *ctx, u64 data); + u32 (*runcntl_read) (struct spu_context * ctx); + void (*runcntl_write) (struct spu_context * ctx, u32 data); + void (*runcntl_stop) (struct spu_context * ctx); + void (*master_start) (struct spu_context * ctx); + void (*master_stop) (struct spu_context * ctx); + int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode); + u32 (*read_mfc_tagstatus)(struct spu_context * ctx); + u32 (*get_mfc_free_elements)(struct spu_context *ctx); + int (*send_mfc_command)(struct spu_context * ctx, + struct mfc_dma_command * cmd); + void (*dma_info_read) (struct spu_context * ctx, + struct spu_dma_info * info); + void (*proxydma_info_read) (struct spu_context * ctx, + struct spu_proxydma_info * info); + void (*restart_dma)(struct spu_context *ctx); +}; + +extern struct spu_context_ops spu_hw_ops; +extern struct spu_context_ops spu_backing_ops; + +struct spufs_inode_info { + struct spu_context *i_ctx; + struct spu_gang *i_gang; + struct inode vfs_inode; + int i_openers; +}; +#define SPUFS_I(inode) \ + container_of(inode, struct spufs_inode_info, vfs_inode) + +struct spufs_tree_descr { + const char *name; + const struct file_operations *ops; + umode_t mode; + size_t size; +}; + +extern const struct spufs_tree_descr spufs_dir_contents[]; +extern const struct spufs_tree_descr spufs_dir_nosched_contents[]; +extern const struct spufs_tree_descr spufs_dir_debug_contents[]; + +/* system call implementation */ +extern struct spufs_calls spufs_calls; +struct coredump_params; +long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status); +long spufs_create(const struct path *nd, struct dentry *dentry, unsigned int flags, + umode_t mode, struct file *filp); +/* ELF coredump callbacks for writing SPU ELF notes */ +extern int spufs_coredump_extra_notes_size(void); +extern int spufs_coredump_extra_notes_write(struct coredump_params *cprm); + +extern const struct file_operations spufs_context_fops; + +/* gang management */ +struct spu_gang *alloc_spu_gang(void); +struct spu_gang *get_spu_gang(struct spu_gang *gang); +int put_spu_gang(struct spu_gang *gang); +void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx); +void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx); + +/* fault handling */ +int spufs_handle_class1(struct spu_context *ctx); +int spufs_handle_class0(struct spu_context *ctx); + +/* affinity */ +struct spu *affinity_check(struct spu_context *ctx); + +/* context management */ +extern atomic_t nr_spu_contexts; +static inline int __must_check spu_acquire(struct spu_context *ctx) +{ + return mutex_lock_interruptible(&ctx->state_mutex); +} + +static inline void spu_release(struct spu_context *ctx) +{ + mutex_unlock(&ctx->state_mutex); +} + +struct spu_context * alloc_spu_context(struct spu_gang *gang); +void destroy_spu_context(struct kref *kref); +struct spu_context * get_spu_context(struct spu_context *ctx); +int put_spu_context(struct spu_context *ctx); +void spu_unmap_mappings(struct spu_context *ctx); + +void spu_forget(struct spu_context *ctx); +int __must_check spu_acquire_saved(struct spu_context *ctx); +void spu_release_saved(struct spu_context *ctx); + +int spu_stopped(struct spu_context *ctx, u32 * stat); +void spu_del_from_rq(struct spu_context *ctx); +int spu_activate(struct spu_context *ctx, unsigned long flags); +void spu_deactivate(struct spu_context *ctx); +void spu_yield(struct spu_context *ctx); +void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, + u32 type, u32 val); +void spu_set_timeslice(struct spu_context *ctx); +void spu_update_sched_info(struct spu_context *ctx); +void __spu_update_sched_info(struct spu_context *ctx); +int __init spu_sched_init(void); +void spu_sched_exit(void); + +extern char *isolated_loader; + +/* + * spufs_wait + * Same as wait_event_interruptible(), except that here + * we need to call spu_release(ctx) before sleeping, and + * then spu_acquire(ctx) when awoken. + * + * Returns with state_mutex re-acquired when successful or + * with -ERESTARTSYS and the state_mutex dropped when interrupted. + */ + +#define spufs_wait(wq, condition) \ +({ \ + int __ret = 0; \ + DEFINE_WAIT(__wait); \ + for (;;) { \ + prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + spu_release(ctx); \ + if (signal_pending(current)) { \ + __ret = -ERESTARTSYS; \ + break; \ + } \ + schedule(); \ + __ret = spu_acquire(ctx); \ + if (__ret) \ + break; \ + } \ + finish_wait(&(wq), &__wait); \ + __ret; \ +}) + +size_t spu_wbox_write(struct spu_context *ctx, u32 data); +size_t spu_ibox_read(struct spu_context *ctx, u32 *data); + +/* irq callback funcs. */ +void spufs_ibox_callback(struct spu *spu); +void spufs_wbox_callback(struct spu *spu); +void spufs_stop_callback(struct spu *spu, int irq); +void spufs_mfc_callback(struct spu *spu); +void spufs_dma_callback(struct spu *spu, int type); + +struct spufs_coredump_reader { + char *name; + ssize_t (*dump)(struct spu_context *ctx, struct coredump_params *cprm); + u64 (*get)(struct spu_context *ctx); + size_t size; +}; +extern const struct spufs_coredump_reader spufs_coredump_read[]; + +extern int spu_init_csa(struct spu_state *csa); +extern void spu_fini_csa(struct spu_state *csa); +extern int spu_save(struct spu_state *prev, struct spu *spu); +extern int spu_restore(struct spu_state *new, struct spu *spu); +extern int spu_switch(struct spu_state *prev, struct spu_state *new, + struct spu *spu); +extern int spu_alloc_lscsa(struct spu_state *csa); +extern void spu_free_lscsa(struct spu_state *csa); + +extern void spuctx_switch_state(struct spu_context *ctx, + enum spu_utilization_state new_state); + +#endif diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.h b/arch/powerpc/platforms/cell/spufs/sputrace.h new file mode 100644 index 0000000000..1def11e911 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/sputrace.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#if !defined(_TRACE_SPUFS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SPUFS_H + +#include +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM spufs + +TRACE_EVENT(spufs_context, + TP_PROTO(struct spu_context *ctx, struct spu *spu, const char *name), + TP_ARGS(ctx, spu, name), + + TP_STRUCT__entry( + __field(const char *, name) + __field(int, owner_tid) + __field(int, number) + ), + + TP_fast_assign( + __entry->name = name; + __entry->owner_tid = ctx->tid; + __entry->number = spu ? spu->number : -1; + ), + + TP_printk("%s (ctxthread = %d, spu = %d)", + __entry->name, __entry->owner_tid, __entry->number) +); + +#define spu_context_trace(name, ctx, spu) \ + trace_spufs_context(ctx, spu, __stringify(name)) +#define spu_context_nospu_trace(name, ctx) \ + trace_spufs_context(ctx, NULL, __stringify(name)) + +#endif /* _TRACE_SPUFS_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE sputrace +#include diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c new file mode 100644 index 0000000000..b41e81b22f --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/switch.c @@ -0,0 +1,2206 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * spu_switch.c + * + * (C) Copyright IBM Corp. 2005 + * + * Author: Mark Nutter + * + * Host-side part of SPU context switch sequence outlined in + * Synergistic Processor Element, Book IV. + * + * A fully premptive switch of an SPE is very expensive in terms + * of time and system resources. SPE Book IV indicates that SPE + * allocation should follow a "serially reusable device" model, + * in which the SPE is assigned a task until it completes. When + * this is not possible, this sequence may be used to premptively + * save, and then later (optionally) restore the context of a + * program executing on an SPE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "spufs.h" + +#include "spu_save_dump.h" +#include "spu_restore_dump.h" + +#if 0 +#define POLL_WHILE_TRUE(_c) { \ + do { \ + } while (_c); \ + } +#else +#define RELAX_SPIN_COUNT 1000 +#define POLL_WHILE_TRUE(_c) { \ + do { \ + int _i; \ + for (_i=0; _iproblem; + u32 isolate_state; + + /* Save, Step 2: + * Save, Step 6: + * If SPU_Status[E,L,IS] any field is '1', this + * SPU is in isolate state and cannot be context + * saved at this time. + */ + isolate_state = SPU_STATUS_ISOLATED_STATE | + SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS; + return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0; +} + +static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) +{ + /* Save, Step 3: + * Restore, Step 2: + * Save INT_Mask_class0 in CSA. + * Write INT_MASK_class0 with value of 0. + * Save INT_Mask_class1 in CSA. + * Write INT_MASK_class1 with value of 0. + * Save INT_Mask_class2 in CSA. + * Write INT_MASK_class2 with value of 0. + * Synchronize all three interrupts to be sure + * we no longer execute a handler on another CPU. + */ + spin_lock_irq(&spu->register_lock); + if (csa) { + csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0); + csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1); + csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2); + } + spu_int_mask_set(spu, 0, 0ul); + spu_int_mask_set(spu, 1, 0ul); + spu_int_mask_set(spu, 2, 0ul); + eieio(); + spin_unlock_irq(&spu->register_lock); + + /* + * This flag needs to be set before calling synchronize_irq so + * that the update will be visible to the relevant handlers + * via a simple load. + */ + set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); + clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags); + synchronize_irq(spu->irqs[0]); + synchronize_irq(spu->irqs[1]); + synchronize_irq(spu->irqs[2]); +} + +static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu) +{ + /* Save, Step 4: + * Restore, Step 25. + * Set a software watchdog timer, which specifies the + * maximum allowable time for a context save sequence. + * + * For present, this implementation will not set a global + * watchdog timer, as virtualization & variable system load + * may cause unpredictable execution times. + */ +} + +static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu) +{ + /* Save, Step 5: + * Restore, Step 3: + * Inhibit user-space access (if provided) to this + * SPU by unmapping the virtual pages assigned to + * the SPU memory-mapped I/O (MMIO) for problem + * state. TBD. + */ +} + +static inline void set_switch_pending(struct spu_state *csa, struct spu *spu) +{ + /* Save, Step 7: + * Restore, Step 5: + * Set a software context switch pending flag. + * Done above in Step 3 - disable_interrupts(). + */ +} + +static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Save, Step 8: + * Suspend DMA and save MFC_CNTL. + */ + switch (in_be64(&priv2->mfc_control_RW) & + MFC_CNTL_SUSPEND_DMA_STATUS_MASK) { + case MFC_CNTL_SUSPEND_IN_PROGRESS: + POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & + MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == + MFC_CNTL_SUSPEND_COMPLETE); + fallthrough; + case MFC_CNTL_SUSPEND_COMPLETE: + if (csa) + csa->priv2.mfc_control_RW = + in_be64(&priv2->mfc_control_RW) | + MFC_CNTL_SUSPEND_DMA_QUEUE; + break; + case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION: + out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE); + POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & + MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == + MFC_CNTL_SUSPEND_COMPLETE); + if (csa) + csa->priv2.mfc_control_RW = + in_be64(&priv2->mfc_control_RW) & + ~MFC_CNTL_SUSPEND_DMA_QUEUE & + ~MFC_CNTL_SUSPEND_MASK; + break; + } +} + +static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + /* Save, Step 9: + * Save SPU_Runcntl in the CSA. This value contains + * the "Application Desired State". + */ + csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW); +} + +static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu) +{ + /* Save, Step 10: + * Save MFC_SR1 in the CSA. + */ + csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu); +} + +static inline void save_spu_status(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + /* Save, Step 11: + * Read SPU_Status[R], and save to CSA. + */ + if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) { + csa->prob.spu_status_R = in_be32(&prob->spu_status_R); + } else { + u32 stopped; + + out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); + eieio(); + POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & + SPU_STATUS_RUNNING); + stopped = + SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | + SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; + if ((in_be32(&prob->spu_status_R) & stopped) == 0) + csa->prob.spu_status_R = SPU_STATUS_RUNNING; + else + csa->prob.spu_status_R = in_be32(&prob->spu_status_R); + } +} + +static inline void save_mfc_stopped_status(struct spu_state *csa, + struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + const u64 mask = MFC_CNTL_DECREMENTER_RUNNING | + MFC_CNTL_DMA_QUEUES_EMPTY; + + /* Save, Step 12: + * Read MFC_CNTL[Ds]. Update saved copy of + * CSA.MFC_CNTL[Ds]. + * + * update: do the same with MFC_CNTL[Q]. + */ + csa->priv2.mfc_control_RW &= ~mask; + csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask; +} + +static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Save, Step 13: + * Write MFC_CNTL[Dh] set to a '1' to halt + * the decrementer. + */ + out_be64(&priv2->mfc_control_RW, + MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK); + eieio(); +} + +static inline void save_timebase(struct spu_state *csa, struct spu *spu) +{ + /* Save, Step 14: + * Read PPE Timebase High and Timebase low registers + * and save in CSA. TBD. + */ + csa->suspend_time = get_cycles(); +} + +static inline void remove_other_spu_access(struct spu_state *csa, + struct spu *spu) +{ + /* Save, Step 15: + * Remove other SPU access to this SPU by unmapping + * this SPU's pages from their address space. TBD. + */ +} + +static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + /* Save, Step 16: + * Restore, Step 11. + * Write SPU_MSSync register. Poll SPU_MSSync[P] + * for a value of 0. + */ + out_be64(&prob->spc_mssync_RW, 1UL); + POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING); +} + +static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu) +{ + /* Save, Step 17: + * Restore, Step 12. + * Restore, Step 48. + * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register. + * Then issue a PPE sync instruction. + */ + spu_tlb_invalidate(spu); + mb(); +} + +static inline void handle_pending_interrupts(struct spu_state *csa, + struct spu *spu) +{ + /* Save, Step 18: + * Handle any pending interrupts from this SPU + * here. This is OS or hypervisor specific. One + * option is to re-enable interrupts to handle any + * pending interrupts, with the interrupt handlers + * recognizing the software Context Switch Pending + * flag, to ensure the SPU execution or MFC command + * queue is not restarted. TBD. + */ +} + +static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + int i; + + /* Save, Step 19: + * If MFC_Cntl[Se]=0 then save + * MFC command queues. + */ + if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) { + for (i = 0; i < 8; i++) { + csa->priv2.puq[i].mfc_cq_data0_RW = + in_be64(&priv2->puq[i].mfc_cq_data0_RW); + csa->priv2.puq[i].mfc_cq_data1_RW = + in_be64(&priv2->puq[i].mfc_cq_data1_RW); + csa->priv2.puq[i].mfc_cq_data2_RW = + in_be64(&priv2->puq[i].mfc_cq_data2_RW); + csa->priv2.puq[i].mfc_cq_data3_RW = + in_be64(&priv2->puq[i].mfc_cq_data3_RW); + } + for (i = 0; i < 16; i++) { + csa->priv2.spuq[i].mfc_cq_data0_RW = + in_be64(&priv2->spuq[i].mfc_cq_data0_RW); + csa->priv2.spuq[i].mfc_cq_data1_RW = + in_be64(&priv2->spuq[i].mfc_cq_data1_RW); + csa->priv2.spuq[i].mfc_cq_data2_RW = + in_be64(&priv2->spuq[i].mfc_cq_data2_RW); + csa->priv2.spuq[i].mfc_cq_data3_RW = + in_be64(&priv2->spuq[i].mfc_cq_data3_RW); + } + } +} + +static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + /* Save, Step 20: + * Save the PPU_QueryMask register + * in the CSA. + */ + csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW); +} + +static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + /* Save, Step 21: + * Save the PPU_QueryType register + * in the CSA. + */ + csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW); +} + +static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + /* Save the Prxy_TagStatus register in the CSA. + * + * It is unnecessary to restore dma_tagstatus_R, however, + * dma_tagstatus_R in the CSA is accessed via backing_ops, so + * we must save it. + */ + csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R); +} + +static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Save, Step 22: + * Save the MFC_CSR_TSQ register + * in the LSCSA. + */ + csa->priv2.spu_tag_status_query_RW = + in_be64(&priv2->spu_tag_status_query_RW); +} + +static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Save, Step 23: + * Save the MFC_CSR_CMD1 and MFC_CSR_CMD2 + * registers in the CSA. + */ + csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW); + csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW); +} + +static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Save, Step 24: + * Save the MFC_CSR_ATO register in + * the CSA. + */ + csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW); +} + +static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu) +{ + /* Save, Step 25: + * Save the MFC_TCLASS_ID register in + * the CSA. + */ + csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu); +} + +static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu) +{ + /* Save, Step 26: + * Restore, Step 23. + * Write the MFC_TCLASS_ID register with + * the value 0x10000000. + */ + spu_mfc_tclass_id_set(spu, 0x10000000); + eieio(); +} + +static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Save, Step 27: + * Restore, Step 14. + * Write MFC_CNTL[Pc]=1 (purge queue). + */ + out_be64(&priv2->mfc_control_RW, + MFC_CNTL_PURGE_DMA_REQUEST | + MFC_CNTL_SUSPEND_MASK); + eieio(); +} + +static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Save, Step 28: + * Poll MFC_CNTL[Ps] until value '11' is read + * (purge complete). + */ + POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & + MFC_CNTL_PURGE_DMA_STATUS_MASK) == + MFC_CNTL_PURGE_DMA_COMPLETE); +} + +static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu) +{ + /* Save, Step 30: + * Restore, Step 18: + * Write MFC_SR1 with MFC_SR1[D=0,S=1] and + * MFC_SR1[TL,R,Pr,T] set correctly for the + * OS specific environment. + * + * Implementation note: The SPU-side code + * for save/restore is privileged, so the + * MFC_SR1[Pr] bit is not set. + * + */ + spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK | + MFC_STATE1_RELOCATE_MASK | + MFC_STATE1_BUS_TLBIE_MASK)); +} + +static inline void save_spu_npc(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + /* Save, Step 31: + * Save SPU_NPC in the CSA. + */ + csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW); +} + +static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Save, Step 32: + * Save SPU_PrivCntl in the CSA. + */ + csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW); +} + +static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Save, Step 33: + * Restore, Step 16: + * Write SPU_PrivCntl[S,Le,A] fields reset to 0. + */ + out_be64(&priv2->spu_privcntl_RW, 0UL); + eieio(); +} + +static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Save, Step 34: + * Save SPU_LSLR in the CSA. + */ + csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW); +} + +static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Save, Step 35: + * Restore, Step 17. + * Reset SPU_LSLR. + */ + out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK); + eieio(); +} + +static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Save, Step 36: + * Save SPU_Cfg in the CSA. + */ + csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW); +} + +static inline void save_pm_trace(struct spu_state *csa, struct spu *spu) +{ + /* Save, Step 37: + * Save PM_Trace_Tag_Wait_Mask in the CSA. + * Not performed by this implementation. + */ +} + +static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu) +{ + /* Save, Step 38: + * Save RA_GROUP_ID register and the + * RA_ENABLE reigster in the CSA. + */ + csa->priv1.resource_allocation_groupID_RW = + spu_resource_allocation_groupID_get(spu); + csa->priv1.resource_allocation_enable_RW = + spu_resource_allocation_enable_get(spu); +} + +static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + /* Save, Step 39: + * Save MB_Stat register in the CSA. + */ + csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R); +} + +static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + /* Save, Step 40: + * Save the PPU_MB register in the CSA. + */ + csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R); +} + +static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Save, Step 41: + * Save the PPUINT_MB register in the CSA. + */ + csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R); +} + +static inline void save_ch_part1(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL }; + int i; + + /* Save, Step 42: + */ + + /* Save CH 1, without channel count */ + out_be64(&priv2->spu_chnlcntptr_RW, 1); + csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW); + + /* Save the following CH: [0,3,4,24,25,27] */ + for (i = 0; i < ARRAY_SIZE(ch_indices); i++) { + idx = ch_indices[i]; + out_be64(&priv2->spu_chnlcntptr_RW, idx); + eieio(); + csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW); + csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW); + out_be64(&priv2->spu_chnldata_RW, 0UL); + out_be64(&priv2->spu_chnlcnt_RW, 0UL); + eieio(); + } +} + +static inline void save_spu_mb(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + int i; + + /* Save, Step 43: + * Save SPU Read Mailbox Channel. + */ + out_be64(&priv2->spu_chnlcntptr_RW, 29UL); + eieio(); + csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW); + for (i = 0; i < 4; i++) { + csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW); + } + out_be64(&priv2->spu_chnlcnt_RW, 0UL); + eieio(); +} + +static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Save, Step 44: + * Save MFC_CMD Channel. + */ + out_be64(&priv2->spu_chnlcntptr_RW, 21UL); + eieio(); + csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW); + eieio(); +} + +static inline void reset_ch(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL }; + u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL }; + u64 idx; + int i; + + /* Save, Step 45: + * Reset the following CH: [21, 23, 28, 30] + */ + for (i = 0; i < 4; i++) { + idx = ch_indices[i]; + out_be64(&priv2->spu_chnlcntptr_RW, idx); + eieio(); + out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); + eieio(); + } +} + +static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Save, Step 46: + * Restore, Step 25. + * Write MFC_CNTL[Sc]=0 (resume queue processing). + */ + out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE); +} + +static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu, + unsigned int *code, int code_size) +{ + /* Save, Step 47: + * Restore, Step 30. + * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All + * register, then initialize SLB_VSID and SLB_ESID + * to provide access to SPU context save code and + * LSCSA. + * + * This implementation places both the context + * switch code and LSCSA in kernel address space. + * + * Further this implementation assumes that the + * MFC_SR1[R]=1 (in other words, assume that + * translation is desired by OS environment). + */ + spu_invalidate_slbs(spu); + spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size); +} + +static inline void set_switch_active(struct spu_state *csa, struct spu *spu) +{ + /* Save, Step 48: + * Restore, Step 23. + * Change the software context switch pending flag + * to context switch active. This implementation does + * not uses a switch active flag. + * + * Now that we have saved the mfc in the csa, we can add in the + * restart command if an exception occurred. + */ + if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags)) + csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND; + clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); + mb(); +} + +static inline void enable_interrupts(struct spu_state *csa, struct spu *spu) +{ + unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR | + CLASS1_ENABLE_STORAGE_FAULT_INTR; + + /* Save, Step 49: + * Restore, Step 22: + * Reset and then enable interrupts, as + * needed by OS. + * + * This implementation enables only class1 + * (translation) interrupts. + */ + spin_lock_irq(&spu->register_lock); + spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); + spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK); + spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); + spu_int_mask_set(spu, 0, 0ul); + spu_int_mask_set(spu, 1, class1_mask); + spu_int_mask_set(spu, 2, 0ul); + spin_unlock_irq(&spu->register_lock); +} + +static inline int send_mfc_dma(struct spu *spu, unsigned long ea, + unsigned int ls_offset, unsigned int size, + unsigned int tag, unsigned int rclass, + unsigned int cmd) +{ + struct spu_problem __iomem *prob = spu->problem; + union mfc_tag_size_class_cmd command; + unsigned int transfer_size; + volatile unsigned int status = 0x0; + + while (size > 0) { + transfer_size = + (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size; + command.u.mfc_size = transfer_size; + command.u.mfc_tag = tag; + command.u.mfc_rclassid = rclass; + command.u.mfc_cmd = cmd; + do { + out_be32(&prob->mfc_lsa_W, ls_offset); + out_be64(&prob->mfc_ea_W, ea); + out_be64(&prob->mfc_union_W.all64, command.all64); + status = + in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32); + if (unlikely(status & 0x2)) { + cpu_relax(); + } + } while (status & 0x3); + size -= transfer_size; + ea += transfer_size; + ls_offset += transfer_size; + } + return 0; +} + +static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu) +{ + unsigned long addr = (unsigned long)&csa->lscsa->ls[0]; + unsigned int ls_offset = 0x0; + unsigned int size = 16384; + unsigned int tag = 0; + unsigned int rclass = 0; + unsigned int cmd = MFC_PUT_CMD; + + /* Save, Step 50: + * Issue a DMA command to copy the first 16K bytes + * of local storage to the CSA. + */ + send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); +} + +static inline void set_spu_npc(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + /* Save, Step 51: + * Restore, Step 31. + * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry + * point address of context save code in local + * storage. + * + * This implementation uses SPU-side save/restore + * programs with entry points at LSA of 0. + */ + out_be32(&prob->spu_npc_RW, 0); + eieio(); +} + +static inline void set_signot1(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + union { + u64 ull; + u32 ui[2]; + } addr64; + + /* Save, Step 52: + * Restore, Step 32: + * Write SPU_Sig_Notify_1 register with upper 32-bits + * of the CSA.LSCSA effective address. + */ + addr64.ull = (u64) csa->lscsa; + out_be32(&prob->signal_notify1, addr64.ui[0]); + eieio(); +} + +static inline void set_signot2(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + union { + u64 ull; + u32 ui[2]; + } addr64; + + /* Save, Step 53: + * Restore, Step 33: + * Write SPU_Sig_Notify_2 register with lower 32-bits + * of the CSA.LSCSA effective address. + */ + addr64.ull = (u64) csa->lscsa; + out_be32(&prob->signal_notify2, addr64.ui[1]); + eieio(); +} + +static inline void send_save_code(struct spu_state *csa, struct spu *spu) +{ + unsigned long addr = (unsigned long)&spu_save_code[0]; + unsigned int ls_offset = 0x0; + unsigned int size = sizeof(spu_save_code); + unsigned int tag = 0; + unsigned int rclass = 0; + unsigned int cmd = MFC_GETFS_CMD; + + /* Save, Step 54: + * Issue a DMA command to copy context save code + * to local storage and start SPU. + */ + send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); +} + +static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + /* Save, Step 55: + * Restore, Step 38. + * Write PPU_QueryMask=1 (enable Tag Group 0) + * and issue eieio instruction. + */ + out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0)); + eieio(); +} + +static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + u32 mask = MFC_TAGID_TO_TAGMASK(0); + unsigned long flags; + + /* Save, Step 56: + * Restore, Step 39. + * Restore, Step 39. + * Restore, Step 46. + * Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete) + * or write PPU_QueryType[TS]=01 and wait for Tag Group + * Complete Interrupt. Write INT_Stat_Class0 or + * INT_Stat_Class2 with value of 'handled'. + */ + POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask); + + local_irq_save(flags); + spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); + spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); + local_irq_restore(flags); +} + +static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + unsigned long flags; + + /* Save, Step 57: + * Restore, Step 40. + * Poll until SPU_Status[R]=0 or wait for SPU Class 0 + * or SPU Class 2 interrupt. Write INT_Stat_class0 + * or INT_Stat_class2 with value of handled. + */ + POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); + + local_irq_save(flags); + spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); + spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); + local_irq_restore(flags); +} + +static inline int check_save_status(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + u32 complete; + + /* Save, Step 54: + * If SPU_Status[P]=1 and SPU_Status[SC] = "success", + * context save succeeded, otherwise context save + * failed. + */ + complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) | + SPU_STATUS_STOPPED_BY_STOP); + return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0; +} + +static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu) +{ + /* Restore, Step 4: + * If required, notify the "using application" that + * the SPU task has been terminated. TBD. + */ +} + +static inline void suspend_mfc_and_halt_decr(struct spu_state *csa, + struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Restore, Step 7: + * Write MFC_Cntl[Dh,Sc,Sm]='1','1','0' to suspend + * the queue and halt the decrementer. + */ + out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE | + MFC_CNTL_DECREMENTER_HALTED); + eieio(); +} + +static inline void wait_suspend_mfc_complete(struct spu_state *csa, + struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Restore, Step 8: + * Restore, Step 47. + * Poll MFC_CNTL[Ss] until 11 is returned. + */ + POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & + MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == + MFC_CNTL_SUSPEND_COMPLETE); +} + +static inline int suspend_spe(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + /* Restore, Step 9: + * If SPU_Status[R]=1, stop SPU execution + * and wait for stop to complete. + * + * Returns 1 if SPU_Status[R]=1 on entry. + * 0 otherwise + */ + if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) { + if (in_be32(&prob->spu_status_R) & + SPU_STATUS_ISOLATED_EXIT_STATUS) { + POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & + SPU_STATUS_RUNNING); + } + if ((in_be32(&prob->spu_status_R) & + SPU_STATUS_ISOLATED_LOAD_STATUS) + || (in_be32(&prob->spu_status_R) & + SPU_STATUS_ISOLATED_STATE)) { + out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); + eieio(); + POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & + SPU_STATUS_RUNNING); + out_be32(&prob->spu_runcntl_RW, 0x2); + eieio(); + POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & + SPU_STATUS_RUNNING); + } + if (in_be32(&prob->spu_status_R) & + SPU_STATUS_WAITING_FOR_CHANNEL) { + out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); + eieio(); + POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & + SPU_STATUS_RUNNING); + } + return 1; + } + return 0; +} + +static inline void clear_spu_status(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + /* Restore, Step 10: + * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1, + * release SPU from isolate state. + */ + if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) { + if (in_be32(&prob->spu_status_R) & + SPU_STATUS_ISOLATED_EXIT_STATUS) { + spu_mfc_sr1_set(spu, + MFC_STATE1_MASTER_RUN_CONTROL_MASK); + eieio(); + out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); + eieio(); + POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & + SPU_STATUS_RUNNING); + } + if ((in_be32(&prob->spu_status_R) & + SPU_STATUS_ISOLATED_LOAD_STATUS) + || (in_be32(&prob->spu_status_R) & + SPU_STATUS_ISOLATED_STATE)) { + spu_mfc_sr1_set(spu, + MFC_STATE1_MASTER_RUN_CONTROL_MASK); + eieio(); + out_be32(&prob->spu_runcntl_RW, 0x2); + eieio(); + POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & + SPU_STATUS_RUNNING); + } + } +} + +static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL }; + u64 idx; + int i; + + /* Restore, Step 20: + */ + + /* Reset CH 1 */ + out_be64(&priv2->spu_chnlcntptr_RW, 1); + out_be64(&priv2->spu_chnldata_RW, 0UL); + + /* Reset the following CH: [0,3,4,24,25,27] */ + for (i = 0; i < ARRAY_SIZE(ch_indices); i++) { + idx = ch_indices[i]; + out_be64(&priv2->spu_chnlcntptr_RW, idx); + eieio(); + out_be64(&priv2->spu_chnldata_RW, 0UL); + out_be64(&priv2->spu_chnlcnt_RW, 0UL); + eieio(); + } +} + +static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL }; + u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL }; + u64 idx; + int i; + + /* Restore, Step 21: + * Reset the following CH: [21, 23, 28, 29, 30] + */ + for (i = 0; i < 5; i++) { + idx = ch_indices[i]; + out_be64(&priv2->spu_chnlcntptr_RW, idx); + eieio(); + out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); + eieio(); + } +} + +static inline void setup_spu_status_part1(struct spu_state *csa, + struct spu *spu) +{ + u32 status_P = SPU_STATUS_STOPPED_BY_STOP; + u32 status_I = SPU_STATUS_INVALID_INSTR; + u32 status_H = SPU_STATUS_STOPPED_BY_HALT; + u32 status_S = SPU_STATUS_SINGLE_STEP; + u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR; + u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP; + u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP; + u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR; + u32 status_code; + + /* Restore, Step 27: + * If the CSA.SPU_Status[I,S,H,P]=1 then add the correct + * instruction sequence to the end of the SPU based restore + * code (after the "context restored" stop and signal) to + * restore the correct SPU status. + * + * NOTE: Rather than modifying the SPU executable, we + * instead add a new 'stopped_status' field to the + * LSCSA. The SPU-side restore reads this field and + * takes the appropriate action when exiting. + */ + + status_code = + (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF; + if ((csa->prob.spu_status_R & status_P_I) == status_P_I) { + + /* SPU_Status[P,I]=1 - Illegal Instruction followed + * by Stop and Signal instruction, followed by 'br -4'. + * + */ + csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I; + csa->lscsa->stopped_status.slot[1] = status_code; + + } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) { + + /* SPU_Status[P,H]=1 - Halt Conditional, followed + * by Stop and Signal instruction, followed by + * 'br -4'. + */ + csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H; + csa->lscsa->stopped_status.slot[1] = status_code; + + } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) { + + /* SPU_Status[S,P]=1 - Stop and Signal instruction + * followed by 'br -4'. + */ + csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P; + csa->lscsa->stopped_status.slot[1] = status_code; + + } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) { + + /* SPU_Status[S,I]=1 - Illegal instruction followed + * by 'br -4'. + */ + csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I; + csa->lscsa->stopped_status.slot[1] = status_code; + + } else if ((csa->prob.spu_status_R & status_P) == status_P) { + + /* SPU_Status[P]=1 - Stop and Signal instruction + * followed by 'br -4'. + */ + csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P; + csa->lscsa->stopped_status.slot[1] = status_code; + + } else if ((csa->prob.spu_status_R & status_H) == status_H) { + + /* SPU_Status[H]=1 - Halt Conditional, followed + * by 'br -4'. + */ + csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H; + + } else if ((csa->prob.spu_status_R & status_S) == status_S) { + + /* SPU_Status[S]=1 - Two nop instructions. + */ + csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S; + + } else if ((csa->prob.spu_status_R & status_I) == status_I) { + + /* SPU_Status[I]=1 - Illegal instruction followed + * by 'br -4'. + */ + csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I; + + } +} + +static inline void setup_spu_status_part2(struct spu_state *csa, + struct spu *spu) +{ + u32 mask; + + /* Restore, Step 28: + * If the CSA.SPU_Status[I,S,H,P,R]=0 then + * add a 'br *' instruction to the end of + * the SPU based restore code. + * + * NOTE: Rather than modifying the SPU executable, we + * instead add a new 'stopped_status' field to the + * LSCSA. The SPU-side restore reads this field and + * takes the appropriate action when exiting. + */ + mask = SPU_STATUS_INVALID_INSTR | + SPU_STATUS_SINGLE_STEP | + SPU_STATUS_STOPPED_BY_HALT | + SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING; + if (!(csa->prob.spu_status_R & mask)) { + csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R; + } +} + +static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu) +{ + /* Restore, Step 29: + * Restore RA_GROUP_ID register and the + * RA_ENABLE reigster from the CSA. + */ + spu_resource_allocation_groupID_set(spu, + csa->priv1.resource_allocation_groupID_RW); + spu_resource_allocation_enable_set(spu, + csa->priv1.resource_allocation_enable_RW); +} + +static inline void send_restore_code(struct spu_state *csa, struct spu *spu) +{ + unsigned long addr = (unsigned long)&spu_restore_code[0]; + unsigned int ls_offset = 0x0; + unsigned int size = sizeof(spu_restore_code); + unsigned int tag = 0; + unsigned int rclass = 0; + unsigned int cmd = MFC_GETFS_CMD; + + /* Restore, Step 37: + * Issue MFC DMA command to copy context + * restore code to local storage. + */ + send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); +} + +static inline void setup_decr(struct spu_state *csa, struct spu *spu) +{ + /* Restore, Step 34: + * If CSA.MFC_CNTL[Ds]=1 (decrementer was + * running) then adjust decrementer, set + * decrementer running status in LSCSA, + * and set decrementer "wrapped" status + * in LSCSA. + */ + if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) { + cycles_t resume_time = get_cycles(); + cycles_t delta_time = resume_time - csa->suspend_time; + + csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING; + if (csa->lscsa->decr.slot[0] < delta_time) { + csa->lscsa->decr_status.slot[0] |= + SPU_DECR_STATUS_WRAPPED; + } + + csa->lscsa->decr.slot[0] -= delta_time; + } else { + csa->lscsa->decr_status.slot[0] = 0; + } +} + +static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu) +{ + /* Restore, Step 35: + * Copy the CSA.PU_MB data into the LSCSA. + */ + csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R; +} + +static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu) +{ + /* Restore, Step 36: + * Copy the CSA.PUINT_MB data into the LSCSA. + */ + csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R; +} + +static inline int check_restore_status(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + u32 complete; + + /* Restore, Step 40: + * If SPU_Status[P]=1 and SPU_Status[SC] = "success", + * context restore succeeded, otherwise context restore + * failed. + */ + complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) | + SPU_STATUS_STOPPED_BY_STOP); + return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0; +} + +static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Restore, Step 41: + * Restore SPU_PrivCntl from the CSA. + */ + out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW); + eieio(); +} + +static inline void restore_status_part1(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + u32 mask; + + /* Restore, Step 42: + * If any CSA.SPU_Status[I,S,H,P]=1, then + * restore the error or single step state. + */ + mask = SPU_STATUS_INVALID_INSTR | + SPU_STATUS_SINGLE_STEP | + SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; + if (csa->prob.spu_status_R & mask) { + out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); + eieio(); + POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & + SPU_STATUS_RUNNING); + } +} + +static inline void restore_status_part2(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + u32 mask; + + /* Restore, Step 43: + * If all CSA.SPU_Status[I,S,H,P,R]=0 then write + * SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1, + * then write '00' to SPU_RunCntl[R0R1] and wait + * for SPU_Status[R]=0. + */ + mask = SPU_STATUS_INVALID_INSTR | + SPU_STATUS_SINGLE_STEP | + SPU_STATUS_STOPPED_BY_HALT | + SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING; + if (!(csa->prob.spu_status_R & mask)) { + out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); + eieio(); + POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) & + SPU_STATUS_RUNNING); + out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); + eieio(); + POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & + SPU_STATUS_RUNNING); + } +} + +static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu) +{ + unsigned long addr = (unsigned long)&csa->lscsa->ls[0]; + unsigned int ls_offset = 0x0; + unsigned int size = 16384; + unsigned int tag = 0; + unsigned int rclass = 0; + unsigned int cmd = MFC_GET_CMD; + + /* Restore, Step 44: + * Issue a DMA command to restore the first + * 16kb of local storage from CSA. + */ + send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); +} + +static inline void suspend_mfc(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Restore, Step 47. + * Write MFC_Cntl[Sc,Sm]='1','0' to suspend + * the queue. + */ + out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE); + eieio(); +} + +static inline void clear_interrupts(struct spu_state *csa, struct spu *spu) +{ + /* Restore, Step 49: + * Write INT_MASK_class0 with value of 0. + * Write INT_MASK_class1 with value of 0. + * Write INT_MASK_class2 with value of 0. + * Write INT_STAT_class0 with value of -1. + * Write INT_STAT_class1 with value of -1. + * Write INT_STAT_class2 with value of -1. + */ + spin_lock_irq(&spu->register_lock); + spu_int_mask_set(spu, 0, 0ul); + spu_int_mask_set(spu, 1, 0ul); + spu_int_mask_set(spu, 2, 0ul); + spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); + spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK); + spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); + spin_unlock_irq(&spu->register_lock); +} + +static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + int i; + + /* Restore, Step 50: + * If MFC_Cntl[Se]!=0 then restore + * MFC command queues. + */ + if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) { + for (i = 0; i < 8; i++) { + out_be64(&priv2->puq[i].mfc_cq_data0_RW, + csa->priv2.puq[i].mfc_cq_data0_RW); + out_be64(&priv2->puq[i].mfc_cq_data1_RW, + csa->priv2.puq[i].mfc_cq_data1_RW); + out_be64(&priv2->puq[i].mfc_cq_data2_RW, + csa->priv2.puq[i].mfc_cq_data2_RW); + out_be64(&priv2->puq[i].mfc_cq_data3_RW, + csa->priv2.puq[i].mfc_cq_data3_RW); + } + for (i = 0; i < 16; i++) { + out_be64(&priv2->spuq[i].mfc_cq_data0_RW, + csa->priv2.spuq[i].mfc_cq_data0_RW); + out_be64(&priv2->spuq[i].mfc_cq_data1_RW, + csa->priv2.spuq[i].mfc_cq_data1_RW); + out_be64(&priv2->spuq[i].mfc_cq_data2_RW, + csa->priv2.spuq[i].mfc_cq_data2_RW); + out_be64(&priv2->spuq[i].mfc_cq_data3_RW, + csa->priv2.spuq[i].mfc_cq_data3_RW); + } + } + eieio(); +} + +static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + /* Restore, Step 51: + * Restore the PPU_QueryMask register from CSA. + */ + out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW); + eieio(); +} + +static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + /* Restore, Step 52: + * Restore the PPU_QueryType register from CSA. + */ + out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW); + eieio(); +} + +static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Restore, Step 53: + * Restore the MFC_CSR_TSQ register from CSA. + */ + out_be64(&priv2->spu_tag_status_query_RW, + csa->priv2.spu_tag_status_query_RW); + eieio(); +} + +static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Restore, Step 54: + * Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2 + * registers from CSA. + */ + out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW); + out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW); + eieio(); +} + +static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Restore, Step 55: + * Restore the MFC_CSR_ATO register from CSA. + */ + out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW); +} + +static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu) +{ + /* Restore, Step 56: + * Restore the MFC_TCLASS_ID register from CSA. + */ + spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW); + eieio(); +} + +static inline void set_llr_event(struct spu_state *csa, struct spu *spu) +{ + u64 ch0_cnt, ch0_data; + u64 ch1_data; + + /* Restore, Step 57: + * Set the Lock Line Reservation Lost Event by: + * 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1. + * 2. If CSA.SPU_Channel_0_Count=0 and + * CSA.SPU_Wr_Event_Mask[Lr]=1 and + * CSA.SPU_Event_Status[Lr]=0 then set + * CSA.SPU_Event_Status_Count=1. + */ + ch0_cnt = csa->spu_chnlcnt_RW[0]; + ch0_data = csa->spu_chnldata_RW[0]; + ch1_data = csa->spu_chnldata_RW[1]; + csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT; + if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) && + (ch1_data & MFC_LLR_LOST_EVENT)) { + csa->spu_chnlcnt_RW[0] = 1; + } +} + +static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu) +{ + /* Restore, Step 58: + * If the status of the CSA software decrementer + * "wrapped" flag is set, OR in a '1' to + * CSA.SPU_Event_Status[Tm]. + */ + if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED)) + return; + + if ((csa->spu_chnlcnt_RW[0] == 0) && + (csa->spu_chnldata_RW[1] & 0x20) && + !(csa->spu_chnldata_RW[0] & 0x20)) + csa->spu_chnlcnt_RW[0] = 1; + + csa->spu_chnldata_RW[0] |= 0x20; +} + +static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL }; + int i; + + /* Restore, Step 59: + * Restore the following CH: [0,3,4,24,25,27] + */ + for (i = 0; i < ARRAY_SIZE(ch_indices); i++) { + idx = ch_indices[i]; + out_be64(&priv2->spu_chnlcntptr_RW, idx); + eieio(); + out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]); + out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]); + eieio(); + } +} + +static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + u64 ch_indices[3] = { 9UL, 21UL, 23UL }; + u64 ch_counts[3] = { 1UL, 16UL, 1UL }; + u64 idx; + int i; + + /* Restore, Step 60: + * Restore the following CH: [9,21,23]. + */ + ch_counts[0] = 1UL; + ch_counts[1] = csa->spu_chnlcnt_RW[21]; + ch_counts[2] = 1UL; + for (i = 0; i < 3; i++) { + idx = ch_indices[i]; + out_be64(&priv2->spu_chnlcntptr_RW, idx); + eieio(); + out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); + eieio(); + } +} + +static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Restore, Step 61: + * Restore the SPU_LSLR register from CSA. + */ + out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW); + eieio(); +} + +static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Restore, Step 62: + * Restore the SPU_Cfg register from CSA. + */ + out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW); + eieio(); +} + +static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu) +{ + /* Restore, Step 63: + * Restore PM_Trace_Tag_Wait_Mask from CSA. + * Not performed by this implementation. + */ +} + +static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + /* Restore, Step 64: + * Restore SPU_NPC from CSA. + */ + out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW); + eieio(); +} + +static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + int i; + + /* Restore, Step 65: + * Restore MFC_RdSPU_MB from CSA. + */ + out_be64(&priv2->spu_chnlcntptr_RW, 29UL); + eieio(); + out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]); + for (i = 0; i < 4; i++) { + out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]); + } + eieio(); +} + +static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + /* Restore, Step 66: + * If CSA.MB_Stat[P]=0 (mailbox empty) then + * read from the PPU_MB register. + */ + if ((csa->prob.mb_stat_R & 0xFF) == 0) { + in_be32(&prob->pu_mb_R); + eieio(); + } +} + +static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Restore, Step 66: + * If CSA.MB_Stat[I]=0 (mailbox empty) then + * read from the PPUINT_MB register. + */ + if ((csa->prob.mb_stat_R & 0xFF0000) == 0) { + in_be64(&priv2->puint_mb_R); + eieio(); + spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR); + eieio(); + } +} + +static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu) +{ + /* Restore, Step 69: + * Restore the MFC_SR1 register from CSA. + */ + spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW); + eieio(); +} + +static inline void set_int_route(struct spu_state *csa, struct spu *spu) +{ + struct spu_context *ctx = spu->ctx; + + spu_cpu_affinity_set(spu, ctx->last_ran); +} + +static inline void restore_other_spu_access(struct spu_state *csa, + struct spu *spu) +{ + /* Restore, Step 70: + * Restore other SPU mappings to this SPU. TBD. + */ +} + +static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + /* Restore, Step 71: + * If CSA.SPU_Status[R]=1 then write + * SPU_RunCntl[R0R1]='01'. + */ + if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) { + out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); + eieio(); + } +} + +static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu) +{ + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Restore, Step 72: + * Restore the MFC_CNTL register for the CSA. + */ + out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW); + eieio(); + + /* + * The queue is put back into the same state that was evident prior to + * the context switch. The suspend flag is added to the saved state in + * the csa, if the operational state was suspending or suspended. In + * this case, the code that suspended the mfc is responsible for + * continuing it. Note that SPE faults do not change the operational + * state of the spu. + */ +} + +static inline void enable_user_access(struct spu_state *csa, struct spu *spu) +{ + /* Restore, Step 73: + * Enable user-space access (if provided) to this + * SPU by mapping the virtual pages assigned to + * the SPU memory-mapped I/O (MMIO) for problem + * state. TBD. + */ +} + +static inline void reset_switch_active(struct spu_state *csa, struct spu *spu) +{ + /* Restore, Step 74: + * Reset the "context switch active" flag. + * Not performed by this implementation. + */ +} + +static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu) +{ + /* Restore, Step 75: + * Re-enable SPU interrupts. + */ + spin_lock_irq(&spu->register_lock); + spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW); + spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW); + spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW); + spin_unlock_irq(&spu->register_lock); +} + +static int quiece_spu(struct spu_state *prev, struct spu *spu) +{ + /* + * Combined steps 2-18 of SPU context save sequence, which + * quiesce the SPU state (disable SPU execution, MFC command + * queues, decrementer, SPU interrupts, etc.). + * + * Returns 0 on success. + * 2 if failed step 2. + * 6 if failed step 6. + */ + + if (check_spu_isolate(prev, spu)) { /* Step 2. */ + return 2; + } + disable_interrupts(prev, spu); /* Step 3. */ + set_watchdog_timer(prev, spu); /* Step 4. */ + inhibit_user_access(prev, spu); /* Step 5. */ + if (check_spu_isolate(prev, spu)) { /* Step 6. */ + return 6; + } + set_switch_pending(prev, spu); /* Step 7. */ + save_mfc_cntl(prev, spu); /* Step 8. */ + save_spu_runcntl(prev, spu); /* Step 9. */ + save_mfc_sr1(prev, spu); /* Step 10. */ + save_spu_status(prev, spu); /* Step 11. */ + save_mfc_stopped_status(prev, spu); /* Step 12. */ + halt_mfc_decr(prev, spu); /* Step 13. */ + save_timebase(prev, spu); /* Step 14. */ + remove_other_spu_access(prev, spu); /* Step 15. */ + do_mfc_mssync(prev, spu); /* Step 16. */ + issue_mfc_tlbie(prev, spu); /* Step 17. */ + handle_pending_interrupts(prev, spu); /* Step 18. */ + + return 0; +} + +static void save_csa(struct spu_state *prev, struct spu *spu) +{ + /* + * Combine steps 19-44 of SPU context save sequence, which + * save regions of the privileged & problem state areas. + */ + + save_mfc_queues(prev, spu); /* Step 19. */ + save_ppu_querymask(prev, spu); /* Step 20. */ + save_ppu_querytype(prev, spu); /* Step 21. */ + save_ppu_tagstatus(prev, spu); /* NEW. */ + save_mfc_csr_tsq(prev, spu); /* Step 22. */ + save_mfc_csr_cmd(prev, spu); /* Step 23. */ + save_mfc_csr_ato(prev, spu); /* Step 24. */ + save_mfc_tclass_id(prev, spu); /* Step 25. */ + set_mfc_tclass_id(prev, spu); /* Step 26. */ + save_mfc_cmd(prev, spu); /* Step 26a - moved from 44. */ + purge_mfc_queue(prev, spu); /* Step 27. */ + wait_purge_complete(prev, spu); /* Step 28. */ + setup_mfc_sr1(prev, spu); /* Step 30. */ + save_spu_npc(prev, spu); /* Step 31. */ + save_spu_privcntl(prev, spu); /* Step 32. */ + reset_spu_privcntl(prev, spu); /* Step 33. */ + save_spu_lslr(prev, spu); /* Step 34. */ + reset_spu_lslr(prev, spu); /* Step 35. */ + save_spu_cfg(prev, spu); /* Step 36. */ + save_pm_trace(prev, spu); /* Step 37. */ + save_mfc_rag(prev, spu); /* Step 38. */ + save_ppu_mb_stat(prev, spu); /* Step 39. */ + save_ppu_mb(prev, spu); /* Step 40. */ + save_ppuint_mb(prev, spu); /* Step 41. */ + save_ch_part1(prev, spu); /* Step 42. */ + save_spu_mb(prev, spu); /* Step 43. */ + reset_ch(prev, spu); /* Step 45. */ +} + +static void save_lscsa(struct spu_state *prev, struct spu *spu) +{ + /* + * Perform steps 46-57 of SPU context save sequence, + * which save regions of the local store and register + * file. + */ + + resume_mfc_queue(prev, spu); /* Step 46. */ + /* Step 47. */ + setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code)); + set_switch_active(prev, spu); /* Step 48. */ + enable_interrupts(prev, spu); /* Step 49. */ + save_ls_16kb(prev, spu); /* Step 50. */ + set_spu_npc(prev, spu); /* Step 51. */ + set_signot1(prev, spu); /* Step 52. */ + set_signot2(prev, spu); /* Step 53. */ + send_save_code(prev, spu); /* Step 54. */ + set_ppu_querymask(prev, spu); /* Step 55. */ + wait_tag_complete(prev, spu); /* Step 56. */ + wait_spu_stopped(prev, spu); /* Step 57. */ +} + +static void force_spu_isolate_exit(struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + struct spu_priv2 __iomem *priv2 = spu->priv2; + + /* Stop SPE execution and wait for completion. */ + out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); + iobarrier_rw(); + POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); + + /* Restart SPE master runcntl. */ + spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK); + iobarrier_w(); + + /* Initiate isolate exit request and wait for completion. */ + out_be64(&priv2->spu_privcntl_RW, 4LL); + iobarrier_w(); + out_be32(&prob->spu_runcntl_RW, 2); + iobarrier_rw(); + POLL_WHILE_FALSE((in_be32(&prob->spu_status_R) + & SPU_STATUS_STOPPED_BY_STOP)); + + /* Reset load request to normal. */ + out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL); + iobarrier_w(); +} + +/** + * stop_spu_isolate + * Check SPU run-control state and force isolated + * exit function as necessary. + */ +static void stop_spu_isolate(struct spu *spu) +{ + struct spu_problem __iomem *prob = spu->problem; + + if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) { + /* The SPU is in isolated state; the only way + * to get it out is to perform an isolated + * exit (clean) operation. + */ + force_spu_isolate_exit(spu); + } +} + +static void harvest(struct spu_state *prev, struct spu *spu) +{ + /* + * Perform steps 2-25 of SPU context restore sequence, + * which resets an SPU either after a failed save, or + * when using SPU for first time. + */ + + disable_interrupts(prev, spu); /* Step 2. */ + inhibit_user_access(prev, spu); /* Step 3. */ + terminate_spu_app(prev, spu); /* Step 4. */ + set_switch_pending(prev, spu); /* Step 5. */ + stop_spu_isolate(spu); /* NEW. */ + remove_other_spu_access(prev, spu); /* Step 6. */ + suspend_mfc_and_halt_decr(prev, spu); /* Step 7. */ + wait_suspend_mfc_complete(prev, spu); /* Step 8. */ + if (!suspend_spe(prev, spu)) /* Step 9. */ + clear_spu_status(prev, spu); /* Step 10. */ + do_mfc_mssync(prev, spu); /* Step 11. */ + issue_mfc_tlbie(prev, spu); /* Step 12. */ + handle_pending_interrupts(prev, spu); /* Step 13. */ + purge_mfc_queue(prev, spu); /* Step 14. */ + wait_purge_complete(prev, spu); /* Step 15. */ + reset_spu_privcntl(prev, spu); /* Step 16. */ + reset_spu_lslr(prev, spu); /* Step 17. */ + setup_mfc_sr1(prev, spu); /* Step 18. */ + spu_invalidate_slbs(spu); /* Step 19. */ + reset_ch_part1(prev, spu); /* Step 20. */ + reset_ch_part2(prev, spu); /* Step 21. */ + enable_interrupts(prev, spu); /* Step 22. */ + set_switch_active(prev, spu); /* Step 23. */ + set_mfc_tclass_id(prev, spu); /* Step 24. */ + resume_mfc_queue(prev, spu); /* Step 25. */ +} + +static void restore_lscsa(struct spu_state *next, struct spu *spu) +{ + /* + * Perform steps 26-40 of SPU context restore sequence, + * which restores regions of the local store and register + * file. + */ + + set_watchdog_timer(next, spu); /* Step 26. */ + setup_spu_status_part1(next, spu); /* Step 27. */ + setup_spu_status_part2(next, spu); /* Step 28. */ + restore_mfc_rag(next, spu); /* Step 29. */ + /* Step 30. */ + setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code)); + set_spu_npc(next, spu); /* Step 31. */ + set_signot1(next, spu); /* Step 32. */ + set_signot2(next, spu); /* Step 33. */ + setup_decr(next, spu); /* Step 34. */ + setup_ppu_mb(next, spu); /* Step 35. */ + setup_ppuint_mb(next, spu); /* Step 36. */ + send_restore_code(next, spu); /* Step 37. */ + set_ppu_querymask(next, spu); /* Step 38. */ + wait_tag_complete(next, spu); /* Step 39. */ + wait_spu_stopped(next, spu); /* Step 40. */ +} + +static void restore_csa(struct spu_state *next, struct spu *spu) +{ + /* + * Combine steps 41-76 of SPU context restore sequence, which + * restore regions of the privileged & problem state areas. + */ + + restore_spu_privcntl(next, spu); /* Step 41. */ + restore_status_part1(next, spu); /* Step 42. */ + restore_status_part2(next, spu); /* Step 43. */ + restore_ls_16kb(next, spu); /* Step 44. */ + wait_tag_complete(next, spu); /* Step 45. */ + suspend_mfc(next, spu); /* Step 46. */ + wait_suspend_mfc_complete(next, spu); /* Step 47. */ + issue_mfc_tlbie(next, spu); /* Step 48. */ + clear_interrupts(next, spu); /* Step 49. */ + restore_mfc_queues(next, spu); /* Step 50. */ + restore_ppu_querymask(next, spu); /* Step 51. */ + restore_ppu_querytype(next, spu); /* Step 52. */ + restore_mfc_csr_tsq(next, spu); /* Step 53. */ + restore_mfc_csr_cmd(next, spu); /* Step 54. */ + restore_mfc_csr_ato(next, spu); /* Step 55. */ + restore_mfc_tclass_id(next, spu); /* Step 56. */ + set_llr_event(next, spu); /* Step 57. */ + restore_decr_wrapped(next, spu); /* Step 58. */ + restore_ch_part1(next, spu); /* Step 59. */ + restore_ch_part2(next, spu); /* Step 60. */ + restore_spu_lslr(next, spu); /* Step 61. */ + restore_spu_cfg(next, spu); /* Step 62. */ + restore_pm_trace(next, spu); /* Step 63. */ + restore_spu_npc(next, spu); /* Step 64. */ + restore_spu_mb(next, spu); /* Step 65. */ + check_ppu_mb_stat(next, spu); /* Step 66. */ + check_ppuint_mb_stat(next, spu); /* Step 67. */ + spu_invalidate_slbs(spu); /* Modified Step 68. */ + restore_mfc_sr1(next, spu); /* Step 69. */ + set_int_route(next, spu); /* NEW */ + restore_other_spu_access(next, spu); /* Step 70. */ + restore_spu_runcntl(next, spu); /* Step 71. */ + restore_mfc_cntl(next, spu); /* Step 72. */ + enable_user_access(next, spu); /* Step 73. */ + reset_switch_active(next, spu); /* Step 74. */ + reenable_interrupts(next, spu); /* Step 75. */ +} + +static int __do_spu_save(struct spu_state *prev, struct spu *spu) +{ + int rc; + + /* + * SPU context save can be broken into three phases: + * + * (a) quiesce [steps 2-16]. + * (b) save of CSA, performed by PPE [steps 17-42] + * (c) save of LSCSA, mostly performed by SPU [steps 43-52]. + * + * Returns 0 on success. + * 2,6 if failed to quiece SPU + * 53 if SPU-side of save failed. + */ + + rc = quiece_spu(prev, spu); /* Steps 2-16. */ + switch (rc) { + default: + case 2: + case 6: + harvest(prev, spu); + return rc; + break; + case 0: + break; + } + save_csa(prev, spu); /* Steps 17-43. */ + save_lscsa(prev, spu); /* Steps 44-53. */ + return check_save_status(prev, spu); /* Step 54. */ +} + +static int __do_spu_restore(struct spu_state *next, struct spu *spu) +{ + int rc; + + /* + * SPU context restore can be broken into three phases: + * + * (a) harvest (or reset) SPU [steps 2-24]. + * (b) restore LSCSA [steps 25-40], mostly performed by SPU. + * (c) restore CSA [steps 41-76], performed by PPE. + * + * The 'harvest' step is not performed here, but rather + * as needed below. + */ + + restore_lscsa(next, spu); /* Steps 24-39. */ + rc = check_restore_status(next, spu); /* Step 40. */ + switch (rc) { + default: + /* Failed. Return now. */ + return rc; + break; + case 0: + /* Fall through to next step. */ + break; + } + restore_csa(next, spu); + + return 0; +} + +/** + * spu_save - SPU context save, with locking. + * @prev: pointer to SPU context save area, to be saved. + * @spu: pointer to SPU iomem structure. + * + * Acquire locks, perform the save operation then return. + */ +int spu_save(struct spu_state *prev, struct spu *spu) +{ + int rc; + + acquire_spu_lock(spu); /* Step 1. */ + rc = __do_spu_save(prev, spu); /* Steps 2-53. */ + release_spu_lock(spu); + if (rc != 0 && rc != 2 && rc != 6) { + panic("%s failed on SPU[%d], rc=%d.\n", + __func__, spu->number, rc); + } + return 0; +} +EXPORT_SYMBOL_GPL(spu_save); + +/** + * spu_restore - SPU context restore, with harvest and locking. + * @new: pointer to SPU context save area, to be restored. + * @spu: pointer to SPU iomem structure. + * + * Perform harvest + restore, as we may not be coming + * from a previous successful save operation, and the + * hardware state is unknown. + */ +int spu_restore(struct spu_state *new, struct spu *spu) +{ + int rc; + + acquire_spu_lock(spu); + harvest(NULL, spu); + spu->slb_replace = 0; + rc = __do_spu_restore(new, spu); + release_spu_lock(spu); + if (rc) { + panic("%s failed on SPU[%d] rc=%d.\n", + __func__, spu->number, rc); + } + return rc; +} +EXPORT_SYMBOL_GPL(spu_restore); + +static void init_prob(struct spu_state *csa) +{ + csa->spu_chnlcnt_RW[9] = 1; + csa->spu_chnlcnt_RW[21] = 16; + csa->spu_chnlcnt_RW[23] = 1; + csa->spu_chnlcnt_RW[28] = 1; + csa->spu_chnlcnt_RW[30] = 1; + csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP; + csa->prob.mb_stat_R = 0x000400; +} + +static void init_priv1(struct spu_state *csa) +{ + /* Enable decode, relocate, tlbie response, master runcntl. */ + csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK | + MFC_STATE1_MASTER_RUN_CONTROL_MASK | + MFC_STATE1_PROBLEM_STATE_MASK | + MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK; + + /* Enable OS-specific set of interrupts. */ + csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR | + CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR | + CLASS0_ENABLE_SPU_ERROR_INTR; + csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR | + CLASS1_ENABLE_STORAGE_FAULT_INTR; + csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR | + CLASS2_ENABLE_SPU_HALT_INTR | + CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR; +} + +static void init_priv2(struct spu_state *csa) +{ + csa->priv2.spu_lslr_RW = LS_ADDR_MASK; + csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE | + MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION | + MFC_CNTL_DMA_QUEUES_EMPTY_MASK; +} + +/** + * spu_alloc_csa - allocate and initialize an SPU context save area. + * + * Allocate and initialize the contents of an SPU context save area. + * This includes enabling address translation, interrupt masks, etc., + * as appropriate for the given OS environment. + * + * Note that storage for the 'lscsa' is allocated separately, + * as it is by far the largest of the context save regions, + * and may need to be pinned or otherwise specially aligned. + */ +int spu_init_csa(struct spu_state *csa) +{ + int rc; + + if (!csa) + return -EINVAL; + memset(csa, 0, sizeof(struct spu_state)); + + rc = spu_alloc_lscsa(csa); + if (rc) + return rc; + + spin_lock_init(&csa->register_lock); + + init_prob(csa); + init_priv1(csa); + init_priv2(csa); + + return 0; +} + +void spu_fini_csa(struct spu_state *csa) +{ + spu_free_lscsa(csa); +} diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c new file mode 100644 index 0000000000..157e046e6e --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/syscalls.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include + +#include + +#include "spufs.h" + +/** + * sys_spu_run - run code loaded into an SPU + * + * @unpc: next program counter for the SPU + * @ustatus: status of the SPU + * + * This system call transfers the control of execution of a + * user space thread to an SPU. It will return when the + * SPU has finished executing or when it hits an error + * condition and it will be interrupted if a signal needs + * to be delivered to a handler in user space. + * + * The next program counter is set to the passed value + * before the SPU starts fetching code and the user space + * pointer gets updated with the new value when returning + * from kernel space. + * + * The status value returned from spu_run reflects the + * value of the spu_status register after the SPU has stopped. + * + */ +static long do_spu_run(struct file *filp, + __u32 __user *unpc, + __u32 __user *ustatus) +{ + long ret; + struct spufs_inode_info *i; + u32 npc, status; + + ret = -EFAULT; + if (get_user(npc, unpc)) + goto out; + + /* check if this file was created by spu_create */ + ret = -EINVAL; + if (filp->f_op != &spufs_context_fops) + goto out; + + i = SPUFS_I(file_inode(filp)); + ret = spufs_run_spu(i->i_ctx, &npc, &status); + + if (put_user(npc, unpc)) + ret = -EFAULT; + + if (ustatus && put_user(status, ustatus)) + ret = -EFAULT; +out: + return ret; +} + +static long do_spu_create(const char __user *pathname, unsigned int flags, + umode_t mode, struct file *neighbor) +{ + struct path path; + struct dentry *dentry; + int ret; + + dentry = user_path_create(AT_FDCWD, pathname, &path, LOOKUP_DIRECTORY); + ret = PTR_ERR(dentry); + if (!IS_ERR(dentry)) { + ret = spufs_create(&path, dentry, flags, mode, neighbor); + done_path_create(&path, dentry); + } + + return ret; +} + +struct spufs_calls spufs_calls = { + .create_thread = do_spu_create, + .spu_run = do_spu_run, + .notify_spus_active = do_notify_spus_active, + .owner = THIS_MODULE, +#ifdef CONFIG_COREDUMP + .coredump_extra_notes_size = spufs_coredump_extra_notes_size, + .coredump_extra_notes_write = spufs_coredump_extra_notes_write, +#endif +}; -- cgit v1.2.3